From 5daa5f54fdcd2b5228add1a4c43a1897b2168f35 Mon Sep 17 00:00:00 2001 From: Bach Le Date: Sun, 17 Dec 2023 18:57:33 +0800 Subject: [PATCH 01/84] Link to cublas dynamically on Windows even with LLAMA_STATIC (#4506) --- CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 57b43c136..e3cd43ab3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -291,7 +291,12 @@ if (LLAMA_CUBLAS) add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${LLAMA_CUDA_PEER_MAX_BATCH_SIZE}) if (LLAMA_STATIC) - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) + if (WIN32) + # As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt) + else () + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) + endif() else() set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) endif() From 62bd52b7bf90819e75f427a95a484cd5eee0b3c7 Mon Sep 17 00:00:00 2001 From: mzcu Date: Sun, 17 Dec 2023 15:54:37 +0100 Subject: [PATCH 02/84] server : allow requests larger than 8K (#4500) --- examples/server/server.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 5f93dcb66..a9f8b3747 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -10,7 +10,8 @@ // crash the server in debug mode, otherwise send an http 500 error #define CPPHTTPLIB_NO_EXCEPTIONS 1 #endif - +// increase max payload length to allow use of larger context size +#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 #include "httplib.h" #include "json.hpp" From eb16dae7e70ca97396190698b29c0f9ee3388e88 Mon Sep 17 00:00:00 2001 From: Alexey Parfenov Date: Sun, 17 Dec 2023 14:56:09 +0000 Subject: [PATCH 03/84] server : fix possible ambiguity in content type charset (#4501) --- examples/server/server.cpp | 44 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index a9f8b3747..be7b5b95e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2699,7 +2699,7 @@ int main(int argc, char **argv) } // API key is invalid or not provided - res.set_content("Unauthorized: Invalid API Key", "text/plain"); + res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8"); res.status = 401; // Unauthorized LOG_WARNING("Unauthorized: Invalid API Key", {}); @@ -2714,28 +2714,28 @@ int main(int argc, char **argv) // this is only called if no index.html is found in the public --path svr.Get("/", [](const httplib::Request &, httplib::Response &res) { - res.set_content(reinterpret_cast(&index_html), index_html_len, "text/html"); + res.set_content(reinterpret_cast(&index_html), index_html_len, "text/html; charset=utf-8"); return false; }); // this is only called if no index.js is found in the public --path svr.Get("/index.js", [](const httplib::Request &, httplib::Response &res) { - res.set_content(reinterpret_cast(&index_js), index_js_len, "text/javascript"); + res.set_content(reinterpret_cast(&index_js), index_js_len, "text/javascript; charset=utf-8"); return false; }); // this is only called if no index.html is found in the public --path svr.Get("/completion.js", [](const httplib::Request &, httplib::Response &res) { - res.set_content(reinterpret_cast(&completion_js), completion_js_len, "application/javascript"); + res.set_content(reinterpret_cast(&completion_js), completion_js_len, "application/javascript; charset=utf-8"); return false; }); // this is only called if no index.html is found in the public --path svr.Get("/json-schema-to-grammar.mjs", [](const httplib::Request &, httplib::Response &res) { - res.set_content(reinterpret_cast(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript"); + res.set_content(reinterpret_cast(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript; charset=utf-8"); return false; }); @@ -2746,7 +2746,7 @@ int main(int argc, char **argv) { "user_name", llama.name_user.c_str() }, { "assistant_name", llama.name_assistant.c_str() } }; - res.set_content(data.dump(), "application/json"); + res.set_content(data.dump(), "application/json; charset=utf-8"); }); svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res) @@ -2760,12 +2760,12 @@ int main(int argc, char **argv) std::string completion_text; task_result result = llama.next_result(task_id); if (!result.error && result.stop) { - res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json"); + res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8"); } else { res.status = 404; - res.set_content(result.result_json["content"], "text/plain"); + res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); return; } } else { @@ -2836,7 +2836,7 @@ int main(int argc, char **argv) }} }; - res.set_content(models.dump(), "application/json"); + res.set_content(models.dump(), "application/json; charset=utf-8"); }); // TODO: add mount point without "/v1" prefix -- how? @@ -2858,10 +2858,10 @@ int main(int argc, char **argv) res.set_content(oaicompat_result.dump(-1, ' ', false, json::error_handler_t::replace), - "application/json"); + "application/json; charset=utf-8"); } else { res.status = 500; - res.set_content(result.result_json["content"], "text/plain"); + res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); return; } } else { @@ -2925,12 +2925,12 @@ int main(int argc, char **argv) task_result result = llama.next_result(task_id); if (!result.error && result.stop) { - res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json"); + res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8"); } else { res.status = 404; - res.set_content(result.result_json["content"], "text/plain"); + res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); return; } } else { @@ -2979,11 +2979,11 @@ int main(int argc, char **argv) svr.Get("/model.json", [&llama](const httplib::Request &, httplib::Response &res) { const json data = llama.get_model_props(); - return res.set_content(data.dump(), "application/json"); + return res.set_content(data.dump(), "application/json; charset=utf-8"); }); svr.Options(R"(/.*)", [](const httplib::Request &, httplib::Response &res) - { return res.set_content("", "application/json"); }); + { return res.set_content("", "application/json; charset=utf-8"); }); svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res) { @@ -2994,7 +2994,7 @@ int main(int argc, char **argv) tokens = llama.tokenize(body["content"], false); } const json data = format_tokenizer_response(tokens); - return res.set_content(data.dump(), "application/json"); + return res.set_content(data.dump(), "application/json; charset=utf-8"); }); svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res) @@ -3008,7 +3008,7 @@ int main(int argc, char **argv) } const json data = format_detokenized_response(content); - return res.set_content(data.dump(), "application/json"); + return res.set_content(data.dump(), "application/json; charset=utf-8"); }); svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res) @@ -3025,7 +3025,7 @@ int main(int argc, char **argv) } const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1); task_result result = llama.next_result(task_id); - return res.set_content(result.result_json.dump(), "application/json"); + return res.set_content(result.result_json.dump(), "application/json; charset=utf-8"); }); svr.set_logger(log_server_request); @@ -3046,7 +3046,7 @@ int main(int argc, char **argv) { snprintf(buf, sizeof(buf), fmt, "Unknown Exception"); } - res.set_content(buf, "text/plain"); + res.set_content(buf, "text/plain; charset=utf-8"); res.status = 500; }); @@ -3054,15 +3054,15 @@ int main(int argc, char **argv) { if (res.status == 401) { - res.set_content("Unauthorized", "text/plain"); + res.set_content("Unauthorized", "text/plain; charset=utf-8"); } if (res.status == 400) { - res.set_content("Invalid request", "text/plain"); + res.set_content("Invalid request", "text/plain; charset=utf-8"); } else if (res.status == 404) { - res.set_content("File Not Found", "text/plain"); + res.set_content("File Not Found", "text/plain; charset=utf-8"); res.status = 404; } }); From 8edd2b40fdbcafbf630f2cf29306b29d5cb48c42 Mon Sep 17 00:00:00 2001 From: AdithyanI Date: Sun, 17 Dec 2023 15:57:56 +0100 Subject: [PATCH 04/84] server : fix grammar being ignored (#4494) Fix bug in identifying the grammar. --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index be7b5b95e..c97efe97d 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2414,7 +2414,7 @@ json oaicompat_completion_params_parse( llama_params["ignore_eos"] = json_value(body, "ignore_eos", false); llama_params["tfs_z"] = json_value(body, "tfs_z", 0.0); - if (llama_params.count("grammar") != 0) { + if (body.count("grammar") != 0) { llama_params["grammar"] = json_value(body, "grammar", json::object()); } From 0ffc92d2d23a789625f018840469af045be1e3c0 Mon Sep 17 00:00:00 2001 From: olexiyb Date: Sun, 17 Dec 2023 17:02:16 +0200 Subject: [PATCH 05/84] server : disable llm logs if SERVER_VERBOSE is off (#3792) --- examples/server/server.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c97efe97d..04038530f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2645,6 +2645,9 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con int main(int argc, char **argv) { +#if SERVER_VERBOSE != 1 + log_disable(); +#endif // own arguments required by this example gpt_params params; server_params sparams; From 45668633fdb522a925c3dafc1ecf426f539efb27 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 17 Dec 2023 16:05:56 +0100 Subject: [PATCH 06/84] finetune : keep allocs alive until all allocations are done (#4486) --- examples/finetune/finetune.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index b9849e8c9..6a668d764 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1620,8 +1620,6 @@ int main(int argc, char ** argv) { opt->params.adam.gclip = params.common.adam_gclip; opt->params.adam.eps_f = params.common.adam_eps_f; - ggml_allocr * alloc = NULL; - printf("%s: init model\n", __func__); bool existed = load_checkpoint_lora_file(params.common.fn_checkpoint_in, &model, &lora, train); @@ -1725,10 +1723,9 @@ int main(int argc, char ** argv) { // allocate input tensors mem_input_data.resize(max_input_size); - alloc = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment); - ggml_allocr_alloc(alloc, tokens_input); - ggml_allocr_alloc(alloc, target_probs); - ggml_allocr_free(alloc); + ggml_allocr_t alloc_inps = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment); + ggml_allocr_alloc(alloc_inps, tokens_input); + ggml_allocr_alloc(alloc_inps, target_probs); // context for compute tensors without their data const size_t estimated_compute_size_wo_data = ( @@ -1755,7 +1752,7 @@ int main(int argc, char ** argv) { // find best evaluation order for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) { ctx_compute = ggml_init(ctx_compute_params); - alloc = ggml_allocr_new_measure(tensor_alignment); + ggml_allocr_t alloc = ggml_allocr_new_measure(tensor_alignment); gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = (enum ggml_cgraph_eval_order) order; gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); @@ -1788,7 +1785,7 @@ int main(int argc, char ** argv) { // allocate compute tensors mem_compute_data.resize(max_compute_size); ctx_compute = ggml_init(ctx_compute_params); - alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment); + ggml_allocr_t alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment); gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = best_order; gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); @@ -1804,6 +1801,8 @@ int main(int argc, char ** argv) { params.common.use_checkpointing ); ggml_allocr_free(alloc); + ggml_allocr_free(alloc_inps); + // tokenize data std::vector train_tokens; From 919c40660fd27157b391b5832d2a577d5afef4cb Mon Sep 17 00:00:00 2001 From: Matheus Gabriel Alves Silva Date: Sun, 17 Dec 2023 12:23:33 -0300 Subject: [PATCH 07/84] build : Check the ROCm installation location (#4485) * build : Check the ROCm installation location * more generic approach * fixup! It was returning the path instead of the command output * fixup! Trailing whitespace --- Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index fb775ae5b..8273f8400 100644 --- a/Makefile +++ b/Makefile @@ -439,9 +439,15 @@ ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h endif # LLAMA_CLBLAST ifdef LLAMA_HIPBLAS - ROCM_PATH ?= /opt/rocm - HIPCC ?= $(ROCM_PATH)/bin/hipcc - GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) + + ifeq ($(wildcard /opt/rocm),) + ROCM_PATH ?= /usr + GPU_TARGETS ?= $(shell $(shell which amdgpu-arch)) + else + ROCM_PATH ?= /opt/rocm + GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) + endif + HIPCC ?= $(ROCM_PATH)/bin/hipcc LLAMA_CUDA_DMMV_X ?= 32 LLAMA_CUDA_MMV_Y ?= 1 LLAMA_CUDA_KQUANTS_ITER ?= 2 From f7f468a97dceec2f8fe8b1ed7a2091083446ebc7 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Sun, 17 Dec 2023 10:45:46 -0500 Subject: [PATCH 08/84] gguf-py : fail fast on nonsensical special token IDs (#4489) --- gguf-py/gguf/vocab.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index de3e5edb5..76924d8f2 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -109,8 +109,10 @@ class SpecialVocab: return True def _set_special_token(self, typ: str, tid: Any) -> None: - if not isinstance(tid, int) or tid < 0: + if not isinstance(tid, int): return + if tid < 0: + raise ValueError(f'invalid value for special token type {typ}: {tid}') if self.n_vocab is None or tid < self.n_vocab: if typ in self.special_token_ids: return From 800a489e4a8be199122259a995b1ee9dd7fae320 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 17 Dec 2023 19:38:41 +0200 Subject: [PATCH 09/84] llama.swiftui : add bench functionality (#4483) * llama.swiftui : add bench button * llama.swiftui : initial bench functionality * force to use n_gpu_layers on simulator * add download buttons & expose llamaState.loadModel * update project.pbxproj * comment #Preview & fix editorconfig check * gitignore : xcode stuff * llama.swiftui : UX improvements * llama.swiftui : avoid data copy via "downloadTask" * llama.swiftui : remove model from project * llama : remove "mostly" from model infos * llama.swiftui : improve bench --------- Co-authored-by: jhen --- .editorconfig | 3 + examples/llama.swiftui/.gitignore | 1 + .../llama.cpp.swift/LibLlama.swift | 182 +++- .../llama.swiftui.xcodeproj/project.pbxproj | 898 +++++++++--------- .../llama.swiftui/Models/LlamaState.swift | 52 +- .../llama.swiftui/UI/ContentView.swift | 114 ++- .../llama.swiftui/UI/DownloadButton.swift | 122 +++ llama.cpp | 33 +- 8 files changed, 895 insertions(+), 510 deletions(-) create mode 100644 examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift diff --git a/.editorconfig b/.editorconfig index a56e9ccc8..16d16b3b5 100644 --- a/.editorconfig +++ b/.editorconfig @@ -23,3 +23,6 @@ insert_final_newline = unset [examples/server/public/*] indent_size = 2 + +[examples/llama.swiftui/llama.swiftui.xcodeproj/*] +indent_style = tab diff --git a/examples/llama.swiftui/.gitignore b/examples/llama.swiftui/.gitignore index 9bce6af39..e585a2a4f 100644 --- a/examples/llama.swiftui/.gitignore +++ b/examples/llama.swiftui/.gitignore @@ -1 +1,2 @@ xcuserdata +xcshareddata diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 3754f0551..272e1fd8a 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -6,16 +6,34 @@ enum LlamaError: Error { case couldNotInitializeContext } +func llama_batch_clear(_ batch: inout llama_batch) { + batch.n_tokens = 0 +} + +func llama_batch_add(_ batch: inout llama_batch, _ id: llama_token, _ pos: llama_pos, _ seq_ids: [llama_seq_id], _ logits: Bool) { + batch.token [Int(batch.n_tokens)] = id + batch.pos [Int(batch.n_tokens)] = pos + batch.n_seq_id[Int(batch.n_tokens)] = Int32(seq_ids.count) + for i in 0.. LlamaContext { + static func create_context(path: String) throws -> LlamaContext { llama_backend_init(false) - let model_params = llama_model_default_params() + var model_params = llama_model_default_params() +#if targetEnvironment(simulator) + model_params.n_gpu_layers = 0 + print("Running on simulator, force use n_gpu_layers = 0") +#endif let model = llama_load_model_from_file(path, model_params) guard let model else { print("Could not load model at \(path)") throw LlamaError.couldNotInitializeContext } + + let n_threads = max(1, min(8, ProcessInfo.processInfo.processorCount - 2)) + print("Using \(n_threads) threads") + var ctx_params = llama_context_default_params() - ctx_params.seed = 1234 + ctx_params.seed = 1234 ctx_params.n_ctx = 2048 - ctx_params.n_threads = 8 - ctx_params.n_threads_batch = 8 + ctx_params.n_threads = UInt32(n_threads) + ctx_params.n_threads_batch = UInt32(n_threads) let context = llama_new_context_with_model(model, ctx_params) guard let context else { @@ -56,6 +83,26 @@ actor LlamaContext { return LlamaContext(model: model, context: context) } + func model_info() -> String { + let result = UnsafeMutablePointer.allocate(capacity: 256) + result.initialize(repeating: Int8(0), count: 256) + defer { + result.deallocate() + } + + // TODO: this is probably very stupid way to get the string from C + + let nChars = llama_model_desc(model, result, 256) + let bufferPointer = UnsafeBufferPointer(start: result, count: Int(nChars)) + + var SwiftString = "" + for char in bufferPointer { + SwiftString.append(Character(UnicodeScalar(UInt8(char)))) + } + + return SwiftString + } + func get_n_tokens() -> Int32 { return batch.n_tokens; } @@ -79,16 +126,11 @@ actor LlamaContext { print(String(cString: token_to_piece(token: id) + [0])) } - // batch = llama_batch_init(512, 0) // done in init() - batch.n_tokens = Int32(tokens_list.count) + llama_batch_clear(&batch) - for i1 in 0.. String { + var pp_avg: Double = 0 + var tg_avg: Double = 0 + + var pp_std: Double = 0 + var tg_std: Double = 0 + + for r in 0.. 1 { + pp_std = sqrt(pp_std / Double(nr - 1) - pp_avg * pp_avg * Double(nr) / Double(nr - 1)) + tg_std = sqrt(tg_std / Double(nr - 1) - tg_avg * tg_avg * Double(nr) / Double(nr - 1)) + } else { + pp_std = 0 + tg_std = 0 + } + + let model_desc = model_info(); + let model_size = String(format: "%.2f GiB", Double(llama_model_size(model)) / 1024.0 / 1024.0 / 1024.0); + let model_n_params = String(format: "%.2f B", Double(llama_model_n_params(model)) / 1e9); + let backend = "Metal"; + let pp_avg_str = String(format: "%.2f", pp_avg); + let tg_avg_str = String(format: "%.2f", tg_avg); + let pp_std_str = String(format: "%.2f", pp_std); + let tg_std_str = String(format: "%.2f", tg_std); + + var result = "" + + result += String("| model | size | params | backend | test | t/s |\n") + result += String("| --- | --- | --- | --- | --- | --- |\n") + result += String("| \(model_desc) | \(model_size) | \(model_n_params) | \(backend) | pp \(pp) | \(pp_avg_str) ± \(pp_std_str) |\n") + result += String("| \(model_desc) | \(model_size) | \(model_n_params) | \(backend) | tg \(tg) | \(tg_avg_str) ± \(tg_std_str) |\n") + + return result; + } + func clear() { tokens_list.removeAll() temporary_invalid_cchars.removeAll() + llama_kv_cache_clear(context) } private func tokenize(text: String, add_bos: Bool) -> [llama_token] { let utf8Count = text.utf8.count - let n_tokens = utf8Count + (add_bos ? 1 : 0) + let n_tokens = utf8Count + (add_bos ? 1 : 0) + 1 let tokens = UnsafeMutablePointer.allocate(capacity: n_tokens) let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, false) diff --git a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj index bc1fd15ce..2e6159928 100644 --- a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj +++ b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj @@ -1,481 +1,483 @@ // !$*UTF8*$! { - archiveVersion = 1; - classes = { - }; - objectVersion = 56; - objects = { + archiveVersion = 1; + classes = { + }; + objectVersion = 56; + objects = { /* Begin PBXBuildFile section */ - 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 542376072B0D9BFB008E6A1C /* ggml-quants.c */; }; - 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */; }; - 542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */ = {isa = PBXBuildFile; fileRef = 549479C82AC9E10B00E0F78B /* ggml-metal.metal */; }; - 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09B2AC8723900A8AEE9 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE -DGGML_USE_METAL -DGGML_USE_K_QUANTS -O3"; }; }; - 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */; }; - 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 542EA0A12AC8729100A8AEE9 /* llama.cpp */; settings = {COMPILER_FLAGS = "-DGGML_USE_K_QUANTS -DGGML_USE_METAL -O3"; }; }; - 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; }; - 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */ = {isa = PBXBuildFile; fileRef = 549479C52AC9E0F200E0F78B /* ggml-metal.m */; settings = {COMPILER_FLAGS = "-fno-objc-arc -DGGML_SWIFT -DGGML_USE_METAL -O3"; }; }; - 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */; }; - 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83782AC328BD0096AF73 /* ContentView.swift */; }; - 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */; }; - 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */; }; - 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8A39BE092AC7601000BFEB40 /* Accelerate.framework */; }; - 8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; }; - 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; }; - 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; }; + 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 542376072B0D9BFB008E6A1C /* ggml-quants.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; + 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; + 542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */ = {isa = PBXBuildFile; fileRef = 549479C82AC9E10B00E0F78B /* ggml-metal.metal */; }; + 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09B2AC8723900A8AEE9 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE -DGGML_USE_METAL -DGGML_USE_K_QUANTS -O3"; }; }; + 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; + 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 542EA0A12AC8729100A8AEE9 /* llama.cpp */; settings = {COMPILER_FLAGS = "-DGGML_USE_K_QUANTS -DGGML_USE_METAL -O3"; }; }; + 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; }; + 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */ = {isa = PBXBuildFile; fileRef = 549479C52AC9E0F200E0F78B /* ggml-metal.m */; settings = {COMPILER_FLAGS = "-fno-objc-arc -DGGML_SWIFT -DGGML_USE_METAL -O3"; }; }; + 7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; }; + 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */; }; + 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83782AC328BD0096AF73 /* ContentView.swift */; }; + 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */; }; + 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */; }; + 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8A39BE092AC7601000BFEB40 /* Accelerate.framework */; }; + 8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; }; + 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; }; + 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ - 542376062B0D9BEA008E6A1C /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-quants.h"; path = "../../ggml-quants.h"; sourceTree = ""; }; - 542376072B0D9BFB008E6A1C /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-quants.c"; path = "../../ggml-quants.c"; sourceTree = ""; }; - 542376092B0D9C40008E6A1C /* ggml-backend.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "ggml-backend.h"; path = "../../ggml-backend.h"; sourceTree = ""; }; - 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-backend.c"; path = "../../ggml-backend.c"; sourceTree = ""; }; - 542EA09B2AC8723900A8AEE9 /* ggml.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ggml.c; path = ../../ggml.c; sourceTree = ""; }; - 542EA09C2AC8723900A8AEE9 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ggml.h; path = ../../ggml.h; sourceTree = ""; }; - 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-alloc.h"; path = "../../ggml-alloc.h"; sourceTree = ""; }; - 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-alloc.c"; path = "../../ggml-alloc.c"; sourceTree = ""; }; - 542EA0A12AC8729100A8AEE9 /* llama.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = llama.cpp; path = ../../llama.cpp; sourceTree = ""; }; - 542EA0A22AC8729100A8AEE9 /* llama.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = llama.h; path = ../../llama.h; sourceTree = ""; }; - 549479C52AC9E0F200E0F78B /* ggml-metal.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "ggml-metal.m"; path = "../../ggml-metal.m"; sourceTree = ""; }; - 549479C62AC9E0F200E0F78B /* ggml-metal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-metal.h"; path = "../../ggml-metal.h"; sourceTree = ""; }; - 549479C82AC9E10B00E0F78B /* ggml-metal.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; name = "ggml-metal.metal"; path = "../../ggml-metal.metal"; sourceTree = ""; }; - 549479CA2AC9E16000E0F78B /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; }; - 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "bridging-header.h"; sourceTree = ""; }; - 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = llama.swiftui.app; sourceTree = BUILT_PRODUCTS_DIR; }; - 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = llama_swiftuiApp.swift; sourceTree = ""; }; - 8A1C83782AC328BD0096AF73 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; - 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; - 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; - 8A39BE092AC7601000BFEB40 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; - 8A3F841F2AC4C824005E2EE8 /* llama-2-7b-chat.Q2_K.gguf */ = {isa = PBXFileReference; lastKnownFileType = file; path = "llama-2-7b-chat.Q2_K.gguf"; sourceTree = ""; }; - 8A3F84232AC4C891005E2EE8 /* models */ = {isa = PBXFileReference; lastKnownFileType = folder; name = models; path = llama.swiftui/Resources/models; sourceTree = ""; }; - 8A907F322AC7134E006146EA /* LibLlama.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibLlama.swift; sourceTree = ""; }; - 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LlamaState.swift; sourceTree = ""; }; + 542376062B0D9BEA008E6A1C /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-quants.h"; path = "../../ggml-quants.h"; sourceTree = ""; }; + 542376072B0D9BFB008E6A1C /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-quants.c"; path = "../../ggml-quants.c"; sourceTree = ""; }; + 542376092B0D9C40008E6A1C /* ggml-backend.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "ggml-backend.h"; path = "../../ggml-backend.h"; sourceTree = ""; }; + 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-backend.c"; path = "../../ggml-backend.c"; sourceTree = ""; }; + 542EA09B2AC8723900A8AEE9 /* ggml.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ggml.c; path = ../../ggml.c; sourceTree = ""; }; + 542EA09C2AC8723900A8AEE9 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ggml.h; path = ../../ggml.h; sourceTree = ""; }; + 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-alloc.h"; path = "../../ggml-alloc.h"; sourceTree = ""; }; + 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-alloc.c"; path = "../../ggml-alloc.c"; sourceTree = ""; }; + 542EA0A12AC8729100A8AEE9 /* llama.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = llama.cpp; path = ../../llama.cpp; sourceTree = ""; }; + 542EA0A22AC8729100A8AEE9 /* llama.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = llama.h; path = ../../llama.h; sourceTree = ""; }; + 549479C52AC9E0F200E0F78B /* ggml-metal.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "ggml-metal.m"; path = "../../ggml-metal.m"; sourceTree = ""; }; + 549479C62AC9E0F200E0F78B /* ggml-metal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-metal.h"; path = "../../ggml-metal.h"; sourceTree = ""; }; + 549479C82AC9E10B00E0F78B /* ggml-metal.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; name = "ggml-metal.metal"; path = "../../ggml-metal.metal"; sourceTree = ""; }; + 549479CA2AC9E16000E0F78B /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; }; + 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DownloadButton.swift; sourceTree = ""; }; + 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "bridging-header.h"; sourceTree = ""; }; + 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = llama.swiftui.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = llama_swiftuiApp.swift; sourceTree = ""; }; + 8A1C83782AC328BD0096AF73 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; + 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; + 8A39BE092AC7601000BFEB40 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; + 8A3F84232AC4C891005E2EE8 /* models */ = {isa = PBXFileReference; lastKnownFileType = folder; name = models; path = llama.swiftui/Resources/models; sourceTree = ""; }; + 8A907F322AC7134E006146EA /* LibLlama.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibLlama.swift; sourceTree = ""; }; + 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LlamaState.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ - 8A1C83702AC328BD0096AF73 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */, - 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; + 8A1C83702AC328BD0096AF73 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */, + 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ - 8A08D1F62AC7383900FE6CD4 /* llama.cpp */ = { - isa = PBXGroup; - children = ( - 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */, - 542376092B0D9C40008E6A1C /* ggml-backend.h */, - 542376062B0D9BEA008E6A1C /* ggml-quants.h */, - 542376072B0D9BFB008E6A1C /* ggml-quants.c */, - 549479C82AC9E10B00E0F78B /* ggml-metal.metal */, - 549479C62AC9E0F200E0F78B /* ggml-metal.h */, - 549479C52AC9E0F200E0F78B /* ggml-metal.m */, - 542EA09B2AC8723900A8AEE9 /* ggml.c */, - 542EA09C2AC8723900A8AEE9 /* ggml.h */, - 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */, - 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */, - 542EA0A12AC8729100A8AEE9 /* llama.cpp */, - 542EA0A22AC8729100A8AEE9 /* llama.h */, - ); - name = llama.cpp; - sourceTree = ""; - }; - 8A1C836A2AC328BD0096AF73 = { - isa = PBXGroup; - children = ( - 8A08D1F62AC7383900FE6CD4 /* llama.cpp */, - 8A907F312AC7134E006146EA /* llama.cpp.swift */, - 8A3F84232AC4C891005E2EE8 /* models */, - 8A1C83752AC328BD0096AF73 /* llama.swiftui */, - 8A1C83742AC328BD0096AF73 /* Products */, - 8A39BE082AC7601000BFEB40 /* Frameworks */, - ); - sourceTree = ""; - }; - 8A1C83742AC328BD0096AF73 /* Products */ = { - isa = PBXGroup; - children = ( - 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */, - ); - name = Products; - sourceTree = ""; - }; - 8A1C83752AC328BD0096AF73 /* llama.swiftui */ = { - isa = PBXGroup; - children = ( - 8A3F84102AC4BD85005E2EE8 /* Resources */, - 8A9F7C4B2AC332DC008AE1EA /* Models */, - 8A9F7C4A2AC332BF008AE1EA /* UI */, - 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */, - 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */, - 8A1C837C2AC328BE0096AF73 /* Preview Content */, - ); - path = llama.swiftui; - sourceTree = ""; - }; - 8A1C837C2AC328BE0096AF73 /* Preview Content */ = { - isa = PBXGroup; - children = ( - 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */, - ); - path = "Preview Content"; - sourceTree = ""; - }; - 8A39BE082AC7601000BFEB40 /* Frameworks */ = { - isa = PBXGroup; - children = ( - 549479CA2AC9E16000E0F78B /* Metal.framework */, - 8A39BE092AC7601000BFEB40 /* Accelerate.framework */, - ); - name = Frameworks; - sourceTree = ""; - }; - 8A3F84102AC4BD85005E2EE8 /* Resources */ = { - isa = PBXGroup; - children = ( - 8A3F84112AC4BD8C005E2EE8 /* models */, - ); - path = Resources; - sourceTree = ""; - }; - 8A3F84112AC4BD8C005E2EE8 /* models */ = { - isa = PBXGroup; - children = ( - 8A3F841F2AC4C824005E2EE8 /* llama-2-7b-chat.Q2_K.gguf */, - ); - path = models; - sourceTree = ""; - }; - 8A907F312AC7134E006146EA /* llama.cpp.swift */ = { - isa = PBXGroup; - children = ( - 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */, - 8A907F322AC7134E006146EA /* LibLlama.swift */, - ); - path = llama.cpp.swift; - sourceTree = ""; - }; - 8A9F7C4A2AC332BF008AE1EA /* UI */ = { - isa = PBXGroup; - children = ( - 8A1C83782AC328BD0096AF73 /* ContentView.swift */, - ); - path = UI; - sourceTree = ""; - }; - 8A9F7C4B2AC332DC008AE1EA /* Models */ = { - isa = PBXGroup; - children = ( - 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */, - ); - path = Models; - sourceTree = ""; - }; + 8A08D1F62AC7383900FE6CD4 /* llama.cpp */ = { + isa = PBXGroup; + children = ( + 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */, + 542376092B0D9C40008E6A1C /* ggml-backend.h */, + 542376062B0D9BEA008E6A1C /* ggml-quants.h */, + 542376072B0D9BFB008E6A1C /* ggml-quants.c */, + 549479C82AC9E10B00E0F78B /* ggml-metal.metal */, + 549479C62AC9E0F200E0F78B /* ggml-metal.h */, + 549479C52AC9E0F200E0F78B /* ggml-metal.m */, + 542EA09B2AC8723900A8AEE9 /* ggml.c */, + 542EA09C2AC8723900A8AEE9 /* ggml.h */, + 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */, + 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */, + 542EA0A12AC8729100A8AEE9 /* llama.cpp */, + 542EA0A22AC8729100A8AEE9 /* llama.h */, + ); + name = llama.cpp; + sourceTree = ""; + }; + 8A1C836A2AC328BD0096AF73 = { + isa = PBXGroup; + children = ( + 8A08D1F62AC7383900FE6CD4 /* llama.cpp */, + 8A907F312AC7134E006146EA /* llama.cpp.swift */, + 8A3F84232AC4C891005E2EE8 /* models */, + 8A1C83752AC328BD0096AF73 /* llama.swiftui */, + 8A1C83742AC328BD0096AF73 /* Products */, + 8A39BE082AC7601000BFEB40 /* Frameworks */, + ); + sourceTree = ""; + }; + 8A1C83742AC328BD0096AF73 /* Products */ = { + isa = PBXGroup; + children = ( + 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */, + ); + name = Products; + sourceTree = ""; + }; + 8A1C83752AC328BD0096AF73 /* llama.swiftui */ = { + isa = PBXGroup; + children = ( + 8A3F84102AC4BD85005E2EE8 /* Resources */, + 8A9F7C4B2AC332DC008AE1EA /* Models */, + 8A9F7C4A2AC332BF008AE1EA /* UI */, + 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */, + 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */, + 8A1C837C2AC328BE0096AF73 /* Preview Content */, + ); + path = llama.swiftui; + sourceTree = ""; + }; + 8A1C837C2AC328BE0096AF73 /* Preview Content */ = { + isa = PBXGroup; + children = ( + 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */, + ); + path = "Preview Content"; + sourceTree = ""; + }; + 8A39BE082AC7601000BFEB40 /* Frameworks */ = { + isa = PBXGroup; + children = ( + 549479CA2AC9E16000E0F78B /* Metal.framework */, + 8A39BE092AC7601000BFEB40 /* Accelerate.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; + 8A3F84102AC4BD85005E2EE8 /* Resources */ = { + isa = PBXGroup; + children = ( + 8A3F84112AC4BD8C005E2EE8 /* models */, + ); + path = Resources; + sourceTree = ""; + }; + 8A3F84112AC4BD8C005E2EE8 /* models */ = { + isa = PBXGroup; + children = ( + ); + path = models; + sourceTree = ""; + }; + 8A907F312AC7134E006146EA /* llama.cpp.swift */ = { + isa = PBXGroup; + children = ( + 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */, + 8A907F322AC7134E006146EA /* LibLlama.swift */, + ); + path = llama.cpp.swift; + sourceTree = ""; + }; + 8A9F7C4A2AC332BF008AE1EA /* UI */ = { + isa = PBXGroup; + children = ( + 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */, + 8A1C83782AC328BD0096AF73 /* ContentView.swift */, + ); + path = UI; + sourceTree = ""; + }; + 8A9F7C4B2AC332DC008AE1EA /* Models */ = { + isa = PBXGroup; + children = ( + 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */, + ); + path = Models; + sourceTree = ""; + }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ - 8A1C83722AC328BD0096AF73 /* llama.swiftui */ = { - isa = PBXNativeTarget; - buildConfigurationList = 8A1C83812AC328BE0096AF73 /* Build configuration list for PBXNativeTarget "llama.swiftui" */; - buildPhases = ( - 8A1C836F2AC328BD0096AF73 /* Sources */, - 8A1C83702AC328BD0096AF73 /* Frameworks */, - 8A1C83712AC328BD0096AF73 /* Resources */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = llama.swiftui; - packageProductDependencies = ( - ); - productName = llama.swiftui; - productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */; - productType = "com.apple.product-type.application"; - }; + 8A1C83722AC328BD0096AF73 /* llama.swiftui */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8A1C83812AC328BE0096AF73 /* Build configuration list for PBXNativeTarget "llama.swiftui" */; + buildPhases = ( + 8A1C836F2AC328BD0096AF73 /* Sources */, + 8A1C83702AC328BD0096AF73 /* Frameworks */, + 8A1C83712AC328BD0096AF73 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = llama.swiftui; + packageProductDependencies = ( + ); + productName = llama.swiftui; + productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */; + productType = "com.apple.product-type.application"; + }; /* End PBXNativeTarget section */ /* Begin PBXProject section */ - 8A1C836B2AC328BD0096AF73 /* Project object */ = { - isa = PBXProject; - attributes = { - BuildIndependentTargetsInParallel = 1; - LastSwiftUpdateCheck = 1500; - LastUpgradeCheck = 1500; - TargetAttributes = { - 8A1C83722AC328BD0096AF73 = { - CreatedOnToolsVersion = 15.0; - LastSwiftMigration = 1500; - }; - }; - }; - buildConfigurationList = 8A1C836E2AC328BD0096AF73 /* Build configuration list for PBXProject "llama.swiftui" */; - compatibilityVersion = "Xcode 14.0"; - developmentRegion = en; - hasScannedForEncodings = 0; - knownRegions = ( - en, - Base, - ); - mainGroup = 8A1C836A2AC328BD0096AF73; - packageReferences = ( - ); - productRefGroup = 8A1C83742AC328BD0096AF73 /* Products */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 8A1C83722AC328BD0096AF73 /* llama.swiftui */, - ); - }; + 8A1C836B2AC328BD0096AF73 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = 1; + LastSwiftUpdateCheck = 1500; + LastUpgradeCheck = 1500; + TargetAttributes = { + 8A1C83722AC328BD0096AF73 = { + CreatedOnToolsVersion = 15.0; + LastSwiftMigration = 1500; + }; + }; + }; + buildConfigurationList = 8A1C836E2AC328BD0096AF73 /* Build configuration list for PBXProject "llama.swiftui" */; + compatibilityVersion = "Xcode 14.0"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 8A1C836A2AC328BD0096AF73; + packageReferences = ( + ); + productRefGroup = 8A1C83742AC328BD0096AF73 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8A1C83722AC328BD0096AF73 /* llama.swiftui */, + ); + }; /* End PBXProject section */ /* Begin PBXResourcesBuildPhase section */ - 8A1C83712AC328BD0096AF73 /* Resources */ = { - isa = PBXResourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */, - 8A3F84242AC4C891005E2EE8 /* models in Resources */, - 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */, - 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; + 8A1C83712AC328BD0096AF73 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */, + 8A3F84242AC4C891005E2EE8 /* models in Resources */, + 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */, + 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXResourcesBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ - 8A1C836F2AC328BD0096AF73 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */, - 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */, - 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */, - 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */, - 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */, - 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */, - 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */, - 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */, - 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */, - 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; + 8A1C836F2AC328BD0096AF73 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */, + 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */, + 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */, + 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */, + 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */, + 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */, + 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */, + 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */, + 7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */, + 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */, + 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXSourcesBuildPhase section */ /* Begin XCBuildConfiguration section */ - 8A1C837F2AC328BE0096AF73 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = dwarf; - ENABLE_STRICT_OBJC_MSGSEND = YES; - ENABLE_TESTABILITY = YES; - ENABLE_USER_SCRIPT_SANDBOXING = YES; - GCC_C_LANGUAGE_STANDARD = gnu17; - GCC_DYNAMIC_NO_PIC = NO; - GCC_NO_COMMON_BLOCKS = YES; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 17.0; - LOCALIZATION_PREFERS_STRING_CATALOGS = YES; - MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; - MTL_FAST_MATH = YES; - ONLY_ACTIVE_ARCH = YES; - SDKROOT = iphoneos; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; - SWIFT_OPTIMIZATION_LEVEL = "-Onone"; - }; - name = Debug; - }; - 8A1C83802AC328BE0096AF73 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - ENABLE_NS_ASSERTIONS = NO; - ENABLE_STRICT_OBJC_MSGSEND = YES; - ENABLE_USER_SCRIPT_SANDBOXING = YES; - GCC_C_LANGUAGE_STANDARD = gnu17; - GCC_NO_COMMON_BLOCKS = YES; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 17.0; - LOCALIZATION_PREFERS_STRING_CATALOGS = YES; - MTL_ENABLE_DEBUG_INFO = NO; - MTL_FAST_MATH = YES; - SDKROOT = iphoneos; - SWIFT_COMPILATION_MODE = wholemodule; - VALIDATE_PRODUCT = YES; - }; - name = Release; - }; - 8A1C83822AC328BE0096AF73 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CLANG_ENABLE_MODULES = YES; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; - DEVELOPMENT_TEAM = STLSG3FG8Q; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - IPHONEOS_DEPLOYMENT_TARGET = 16.0; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; - SWIFT_OPTIMIZATION_LEVEL = "-Onone"; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Debug; - }; - 8A1C83832AC328BE0096AF73 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CLANG_ENABLE_MODULES = YES; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; - DEVELOPMENT_TEAM = STLSG3FG8Q; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - IPHONEOS_DEPLOYMENT_TARGET = 16.0; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Release; - }; + 8A1C837F2AC328BE0096AF73 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 17.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 8A1C83802AC328BE0096AF73 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 17.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + SWIFT_COMPILATION_MODE = wholemodule; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 8A1C83822AC328BE0096AF73 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; + DEVELOPMENT_TEAM = STLSG3FG8Q; + ENABLE_PREVIEWS = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 8A1C83832AC328BE0096AF73 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; + DEVELOPMENT_TEAM = STLSG3FG8Q; + ENABLE_PREVIEWS = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ - 8A1C836E2AC328BD0096AF73 /* Build configuration list for PBXProject "llama.swiftui" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 8A1C837F2AC328BE0096AF73 /* Debug */, - 8A1C83802AC328BE0096AF73 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 8A1C83812AC328BE0096AF73 /* Build configuration list for PBXNativeTarget "llama.swiftui" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 8A1C83822AC328BE0096AF73 /* Debug */, - 8A1C83832AC328BE0096AF73 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; + 8A1C836E2AC328BD0096AF73 /* Build configuration list for PBXProject "llama.swiftui" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8A1C837F2AC328BE0096AF73 /* Debug */, + 8A1C83802AC328BE0096AF73 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8A1C83812AC328BE0096AF73 /* Build configuration list for PBXNativeTarget "llama.swiftui" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8A1C83822AC328BE0096AF73 /* Debug */, + 8A1C83832AC328BE0096AF73 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; /* End XCConfigurationList section */ - }; - rootObject = 8A1C836B2AC328BD0096AF73 /* Project object */; + }; + rootObject = 8A1C836B2AC328BD0096AF73 /* Project object */; } diff --git a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift index babc60cdc..3393eb242 100644 --- a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift +++ b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift @@ -3,24 +3,26 @@ import Foundation @MainActor class LlamaState: ObservableObject { @Published var messageLog = "" + @Published var cacheCleared = false private var llamaContext: LlamaContext? - private var modelUrl: URL? { - Bundle.main.url(forResource: "q8_0", withExtension: "gguf", subdirectory: "models") + private var defaultModelUrl: URL? { + Bundle.main.url(forResource: "ggml-model", withExtension: "gguf", subdirectory: "models") // Bundle.main.url(forResource: "llama-2-7b-chat", withExtension: "Q2_K.gguf", subdirectory: "models") } + init() { do { - try loadModel() + try loadModel(modelUrl: defaultModelUrl) } catch { messageLog += "Error!\n" } } - private func loadModel() throws { + func loadModel(modelUrl: URL?) throws { messageLog += "Loading model...\n" if let modelUrl { - llamaContext = try LlamaContext.createContext(path: modelUrl.path()) + llamaContext = try LlamaContext.create_context(path: modelUrl.path()) messageLog += "Loaded model \(modelUrl.lastPathComponent)\n" } else { messageLog += "Could not locate model\n" @@ -31,7 +33,7 @@ class LlamaState: ObservableObject { guard let llamaContext else { return } - messageLog += "Attempting to complete text...\n" + await llamaContext.completion_init(text: text) messageLog += "\(text)" @@ -42,4 +44,42 @@ class LlamaState: ObservableObject { await llamaContext.clear() messageLog += "\n\ndone\n" } + + func bench() async { + guard let llamaContext else { + return + } + + messageLog += "\n" + messageLog += "Running benchmark...\n" + messageLog += "Model info: " + messageLog += await llamaContext.model_info() + "\n" + + let t_start = DispatchTime.now().uptimeNanoseconds + await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up + let t_end = DispatchTime.now().uptimeNanoseconds + + let t_heat = Double(t_end - t_start) / 1_000_000_000.0 + messageLog += "Heat up time: \(t_heat) seconds, please wait...\n" + + // if more than 5 seconds, then we're probably running on a slow device + if t_heat > 5.0 { + messageLog += "Heat up time is too long, aborting benchmark\n" + return + } + + let result = await llamaContext.bench(pp: 512, tg: 128, pl: 1, nr: 3) + + messageLog += "\(result)" + messageLog += "\n" + } + + func clear() async { + guard let llamaContext else { + return + } + + await llamaContext.clear() + messageLog = "" + } } diff --git a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift index 0bd16a806..219bf4dc1 100644 --- a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift +++ b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift @@ -5,24 +5,97 @@ struct ContentView: View { @State private var multiLineText = "" + private static func cleanupModelCaches() { + // Delete all models (*.gguf) + let fileManager = FileManager.default + let documentsUrl = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] + do { + let fileURLs = try fileManager.contentsOfDirectory(at: documentsUrl, includingPropertiesForKeys: nil) + for fileURL in fileURLs { + if fileURL.pathExtension == "gguf" { + try fileManager.removeItem(at: fileURL) + } + } + } catch { + print("Error while enumerating files \(documentsUrl.path): \(error.localizedDescription)") + } + } + var body: some View { VStack { - ScrollView(.vertical) { + ScrollView(.vertical, showsIndicators: true) { Text(llamaState.messageLog) + .font(.system(size: 12)) + .frame(maxWidth: .infinity, alignment: .leading) + .padding() + .onTapGesture { + UIApplication.shared.sendAction(#selector(UIResponder.resignFirstResponder), to: nil, from: nil, for: nil) + } } TextEditor(text: $multiLineText) - .frame(height: 200) + .frame(height: 80) .padding() .border(Color.gray, width: 0.5) - Button(action: { - sendText() - }) { - Text("Send") - .padding() - .background(Color.blue) - .foregroundColor(.white) - .cornerRadius(8) + + HStack { + Button("Send") { + sendText() + } + .padding(8) + .background(Color.blue) + .foregroundColor(.white) + .cornerRadius(8) + + Button("Bench") { + bench() + } + .padding(8) + .background(Color.blue) + .foregroundColor(.white) + .cornerRadius(8) + + Button("Clear") { + clear() + } + .padding(8) + .background(Color.blue) + .foregroundColor(.white) + .cornerRadius(8) + + Button("Copy") { + UIPasteboard.general.string = llamaState.messageLog + } + .padding(8) + .background(Color.blue) + .foregroundColor(.white) + .cornerRadius(8) + } + + VStack { + DownloadButton( + llamaState: llamaState, + modelName: "TinyLlama-1.1B (Q4_0)", + modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true", + filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf" + ) + .font(.system(size: 12)) + .padding(.top, 4) + + DownloadButton( + llamaState: llamaState, + modelName: "TinyLlama-1.1B (Q8_0)", + modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true", + filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf" + ) + .font(.system(size: 12)) + + Button("Clear downloaded models") { + ContentView.cleanupModelCaches() + llamaState.cacheCleared = true + } + .padding(8) + .font(.system(size: 12)) } } .padding() @@ -34,9 +107,20 @@ struct ContentView: View { multiLineText = "" } } + + func bench() { + Task { + await llamaState.bench() + } + } + + func clear() { + Task { + await llamaState.clear() + } + } } -/* -#Preview { - ContentView() -} -*/ + +//#Preview { +// ContentView() +//} diff --git a/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift b/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift new file mode 100644 index 000000000..4bd75cb69 --- /dev/null +++ b/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift @@ -0,0 +1,122 @@ +import SwiftUI + +struct DownloadButton: View { + @ObservedObject private var llamaState: LlamaState + private var modelName: String + private var modelUrl: String + private var filename: String + + @State private var status: String + + @State private var downloadTask: URLSessionDownloadTask? + @State private var progress = 0.0 + @State private var observation: NSKeyValueObservation? + + private static func getFileURL(filename: String) -> URL { + FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0].appendingPathComponent(filename) + } + + private func checkFileExistenceAndUpdateStatus() { + } + + init(llamaState: LlamaState, modelName: String, modelUrl: String, filename: String) { + self.llamaState = llamaState + self.modelName = modelName + self.modelUrl = modelUrl + self.filename = filename + + let fileURL = DownloadButton.getFileURL(filename: filename) + status = FileManager.default.fileExists(atPath: fileURL.path) ? "downloaded" : "download" + } + + private func download() { + status = "downloading" + print("Downloading model \(modelName) from \(modelUrl)") + guard let url = URL(string: modelUrl) else { return } + let fileURL = DownloadButton.getFileURL(filename: filename) + + downloadTask = URLSession.shared.downloadTask(with: url) { temporaryURL, response, error in + if let error = error { + print("Error: \(error.localizedDescription)") + return + } + + guard let response = response as? HTTPURLResponse, (200...299).contains(response.statusCode) else { + print("Server error!") + return + } + + do { + if let temporaryURL = temporaryURL { + try FileManager.default.copyItem(at: temporaryURL, to: fileURL) + print("Writing to \(filename) completed") + + llamaState.cacheCleared = false + + status = "downloaded" + } + } catch let err { + print("Error: \(err.localizedDescription)") + } + } + + observation = downloadTask?.progress.observe(\.fractionCompleted) { progress, _ in + self.progress = progress.fractionCompleted + } + + downloadTask?.resume() + } + + var body: some View { + VStack { + if status == "download" { + Button(action: download) { + Text("Download " + modelName) + } + } else if status == "downloading" { + Button(action: { + downloadTask?.cancel() + status = "download" + }) { + Text("\(modelName) (Downloading \(Int(progress * 100))%)") + } + } else if status == "downloaded" { + Button(action: { + let fileURL = DownloadButton.getFileURL(filename: filename) + if !FileManager.default.fileExists(atPath: fileURL.path) { + download() + return + } + do { + try llamaState.loadModel(modelUrl: fileURL) + } catch let err { + print("Error: \(err.localizedDescription)") + } + }) { + Text("\(modelName) (Downloaded)") + } + } else { + Text("Unknown status") + } + } + .onDisappear() { + downloadTask?.cancel() + } + .onChange(of: llamaState.cacheCleared) { newValue in + if newValue { + downloadTask?.cancel() + let fileURL = DownloadButton.getFileURL(filename: filename) + status = FileManager.default.fileExists(atPath: fileURL.path) ? "downloaded" : "download" + } + } + } +} + +// #Preview { +// DownloadButton( +// llamaState: LlamaState(), +// modelName: "TheBloke / TinyLlama-1.1B-1T-OpenOrca-GGUF (Q4_0)", +// modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true", +// filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf" +// ) +// } diff --git a/llama.cpp b/llama.cpp index f49214c13..fd9fd6ed9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2397,25 +2397,25 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { switch (ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; - case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; - case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; - case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; + case LLAMA_FTYPE_MOSTLY_F16: return "F16"; + case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; + case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: - return "mostly Q4_1, some F16"; - case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; - case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; - case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; + return "Q4_1, some F16"; + case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; + case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; + case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; // K-quants - case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; - case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large"; - case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K"; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; default: return "unknown, may not work"; } @@ -2533,6 +2533,7 @@ static void llm_load_hparams( ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { + case 22: model.type = e_model::MODEL_1B; break; case 26: model.type = e_model::MODEL_3B; break; case 32: model.type = e_model::MODEL_7B; break; case 40: model.type = e_model::MODEL_13B; break; From b1306c439490c7fa4ec33594500d980d1e9e15e6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 17 Dec 2023 20:16:23 +0200 Subject: [PATCH 10/84] readme : update hot topics --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index edbe6ba57..01aef2afc 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,11 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ ### Hot topics +- Collecting Apple Silicon performance stats: + - M-series: https://github.com/ggerganov/llama.cpp/discussions/4167 + - A-series: https://github.com/ggerganov/llama.cpp/discussions/4508 - Added Mixtral support: https://github.com/ggerganov/llama.cpp/pull/4406 -- **llama.h API change for handling KV cache offloading and data type: https://github.com/ggerganov/llama.cpp/pull/4309** -- Using `llama.cpp` with AWS instances: https://github.com/ggerganov/llama.cpp/discussions/4225 - Looking for contributions to improve and maintain the `server` example: https://github.com/ggerganov/llama.cpp/issues/4216 -- Collecting Apple Silicon performance stats: https://github.com/ggerganov/llama.cpp/discussions/4167 ---- From 2994f0c5a2e8c96955b422dedc93ec2595d16b82 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Sun, 17 Dec 2023 19:39:02 -0500 Subject: [PATCH 11/84] decode : fix logits_valid for legacy API (#4516) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index fd9fd6ed9..d6d575f9e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6184,7 +6184,7 @@ static int llama_decode_internal( logits_out.resize(n_vocab); memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(n_tokens - 1)), sizeof(float)*n_vocab); #ifndef NDEBUG - logits_valid[n_tokens - 1] = true; + logits_valid[0] = true; #endif } } From 3c04bf6da89eaf4c7d317e0518f0687dfcbf2de7 Mon Sep 17 00:00:00 2001 From: hankcs Date: Mon, 18 Dec 2023 05:14:58 -0800 Subject: [PATCH 12/84] llama : fix try_override for bool_value which always return true (#4519) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index d6d575f9e..99facbf77 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1937,7 +1937,7 @@ namespace GGUFMeta { target = override->bool_value; return true; } - return true; + return false; } template From b9e74f9bca5fdf7d0a22ed25e7a9626335fdfa48 Mon Sep 17 00:00:00 2001 From: Ebey Abraham Date: Mon, 18 Dec 2023 17:27:47 +0000 Subject: [PATCH 13/84] llama : add phi-2 + fix NeoX rope + ggml_mul_mat_set_prec (#4490) * phi2 implementation * fix breaking change * phi-2 : various fixes * phi-2 : use layer norm eps * py : whitespaces * llama : fix meta KV override bug * convert : phi don't add BOS token * convert : revert "added_tokens_decoder" change * phi-2 : scale Q instead of KQ for better precision * ggml : fix NeoX rope to rotate just first n_dims * cuda : less diff in the rope_neox kernel * ggml : add ggml_mul_mat_set_prec ggml-ci * Update ggml-cuda.cu Co-authored-by: slaren * Update ggml-cuda.cu Co-authored-by: slaren * cuda : ggml_cuda_op_mul_mat_cublas support F32 precision * cuda : remove oboslete comment --------- Co-authored-by: Ebey Abraham Co-authored-by: Georgi Gerganov Co-authored-by: slaren --- convert-hf-to-gguf.py | 22 +++ ggml-cuda.cu | 117 +++++++++---- ggml-metal.metal | 13 +- ggml.c | 46 ++++- ggml.h | 12 ++ gguf-py/gguf/constants.py | 13 ++ gguf-py/gguf/tensor_mapping.py | 8 + llama.cpp | 307 +++++++++++++++++++++++++++++---- tests/test-backend-ops.cpp | 1 + 9 files changed, 463 insertions(+), 76 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index e46a7813a..e71a96c48 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -182,6 +182,8 @@ class Model: return QwenModel if model_architecture == "MixtralForCausalLM": return MixtralModel + if model_architecture == "PhiForCausalLM": + return Phi2Model return Model def _is_model_safetensors(self) -> bool: @@ -221,6 +223,8 @@ class Model: return gguf.MODEL_ARCH.QWEN if arch == "MixtralForCausalLM": return gguf.MODEL_ARCH.LLAMA + if arch == "PhiForCausalLM": + return gguf.MODEL_ARCH.PHI2 raise NotImplementedError(f'Architecture "{arch}" not supported!') @@ -980,6 +984,24 @@ class QwenModel(Model): print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") self.gguf_writer.add_tensor(new_name, data) + +class Phi2Model(Model): + def set_gguf_parameters(self): + block_count = self.hparams["n_layer"] + + self.gguf_writer.add_name("Phi2") + self.gguf_writer.add_context_length(self.hparams["n_positions"]) + self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(self.hparams["n_head"]) + self.gguf_writer.add_head_count_kv(self.hparams["n_head"]) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_rope_dimension_count(self.hparams["rotary_dim"]) + self.gguf_writer.add_file_type(self.ftype) + self.gguf_writer.add_add_bos_token(False) + + ###### CONVERSION LOGIC ###### diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 0a63c1ecf..d0f3d8034 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -4998,7 +4998,16 @@ static __global__ void rope_neox( const int ib = col / n_dims; const int ic = col % n_dims; - const int i = row*ncols + ib*n_dims + ic/2; + if (ib > 0) { + const int i = row*ncols + ib*n_dims + ic; + + dst[i + 0] = x[i + 0]; + dst[i + 1] = x[i + 1]; + + return; + } + + const int i = row*ncols + ib*n_dims + ic/2; const int i2 = row/p_delta_rows; float cur_rot = inv_ndims * ic - ib; @@ -7057,6 +7066,7 @@ inline void ggml_cuda_op_upscale( (void) src1; (void) dst; + (void) src1_dd; } inline void ggml_cuda_op_pad( @@ -7073,6 +7083,7 @@ inline void ggml_cuda_op_pad( (void) src1; (void) dst; + (void) src1_dd; } inline void ggml_cuda_op_rms_norm( @@ -7376,7 +7387,7 @@ inline void ggml_cuda_op_mul_mat_cublas( const int compute_capability = g_compute_capabilities[id]; - if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { + if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 half * src0_as_f16 = nullptr; size_t src0_as = 0; @@ -8300,27 +8311,27 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor } static __global__ void k_compute_batched_ptrs( - const half * src0_as_f16, const half * src1_as_f16, half * dst_f16, + const half * src0_as_f16, const half * src1_as_f16, char * dst, const void ** ptrs_src, void ** ptrs_dst, - int ne12, int ne13, - int ne23, - int nb02, int nb03, - int nb12, int nb13, - int nb2, int nb3, - int r2, int r3) { - int i13 = blockIdx.x * blockDim.x + threadIdx.x; - int i12 = blockIdx.y * blockDim.y + threadIdx.y; + int64_t ne12, int64_t ne13, + int64_t ne23, + size_t nb02, size_t nb03, + size_t nb12, size_t nb13, + size_t nbd2, size_t nbd3, + int64_t r2, int64_t r3) { + int64_t i13 = blockIdx.x * blockDim.x + threadIdx.x; + int64_t i12 = blockIdx.y * blockDim.y + threadIdx.y; if (i13 >= ne13 || i12 >= ne12) { return; } - int i03 = i13 / r3; - int i02 = i12 / r2; + int64_t i03 = i13 / r3; + int64_t i02 = i12 / r2; ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2; - ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/2 + i13* nb3/2; + ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; } static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -8376,7 +8387,41 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); + + half * dst_f16 = nullptr; + char * dst_t = nullptr; + + cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; + cudaDataType_t cu_data_type = CUDA_R_16F; + + // dst strides + size_t nbd2 = dst->nb[2]; + size_t nbd3 = dst->nb[3]; + + const half alpha_f16 = 1.0f; + const half beta_f16 = 0.0f; + + const float alpha_f32 = 1.0f; + const float beta_f32 = 0.0f; + + const void * alpha = &alpha_f16; + const void * beta = &beta_f16; + + if (dst->op_params[0] == GGML_PREC_DEFAULT) { + dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); + dst_t = (char *) dst_f16; + + nbd2 /= sizeof(float) / sizeof(half); + nbd3 /= sizeof(float) / sizeof(half); + } else { + dst_t = (char *) dst_ddf; + + cu_compute_type = CUBLAS_COMPUTE_32F; + cu_data_type = CUDA_R_32F; + + alpha = &alpha_f32; + beta = &beta_f32; + } GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); @@ -8385,9 +8430,6 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; - const half alpha_f16 = 1.0f; - const half beta_f16 = 0.0f; - #if 0 // use cublasGemmEx { @@ -8397,12 +8439,12 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const int i02 = i12 / r2; CUBLAS_CHECK( - cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, + cublasGemmEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - &alpha_f16, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half), - (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float), - &beta_f16, ( char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2, CUDA_R_16F, ne01, - CUBLAS_COMPUTE_16F, + alpha, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half), + (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float), + beta, ( char *) dst_t + i12*nbd2 + i13*nbd3, cu_data_type, ne01, + cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } } @@ -8414,11 +8456,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - &alpha_f16, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA - (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB - &beta_f16, ( char *) dst_f16, CUDA_R_16F, ne01, dst->nb[2]/sizeof(float), // strideC + alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA + (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB + beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC ne12*ne13, - CUBLAS_COMPUTE_16F, + cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { // use cublasGemmBatchedEx @@ -8435,24 +8477,24 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_as_f16, src1_as_f16, dst_f16, + src0_as_f16, src1_as_f16, dst_t, ptrs_src, ptrs_dst, ne12, ne13, ne23, nb02, nb03, nb12, nb13, - dst->nb[2], dst->nb[3], + nbd2, nbd3, r2, r3); CUDA_CHECK(cudaGetLastError()); CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - &alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float), - &beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01, + alpha, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half), + (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + beta, ( void **) (ptrs_dst + 0*ne23), cu_data_type, ne01, ne23, - CUBLAS_COMPUTE_16F, + cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (ptrs_src_s != 0) { @@ -8464,11 +8506,14 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const } #endif - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); + if (dst->op_params[0] == GGML_PREC_DEFAULT) { + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); + to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); + + ggml_cuda_pool_free(dst_f16, dst_as); + } ggml_cuda_pool_free(src1_as_f16, src1_as); - ggml_cuda_pool_free(dst_f16, dst_as); } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { diff --git a/ggml-metal.metal b/ggml-metal.metal index fe0ada445..d5b54e112 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -1702,8 +1702,9 @@ kernel void kernel_rope( dst_data[1] = x0*sin_theta + x1*cos_theta; } } else { - for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { - for (int64_t ic = 2*tiitg; ic < n_dims; ic += 2*tptg.x) { + for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) { + if (ic < n_dims) { + const int64_t ib = 0; // simplified from `(ib * n_dims + ic) * inv_ndims` const float cur_rot = inv_ndims*ic - ib; @@ -1722,6 +1723,14 @@ kernel void kernel_rope( dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; + } else { + const int64_t i0 = ic; + + device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; } } } diff --git a/ggml.c b/ggml.c index ad546a731..6da65bd92 100644 --- a/ggml.c +++ b/ggml.c @@ -4098,6 +4098,14 @@ struct ggml_tensor * ggml_mul_mat( return result; } +void ggml_mul_mat_set_prec( + struct ggml_tensor * a, + enum ggml_prec prec) { + const int32_t prec_i32 = (int32_t) prec; + + ggml_set_op_params_i32(a, 0, prec_i32); +} + // ggml_mul_mat_id struct ggml_tensor * ggml_mul_mat_id( @@ -9168,6 +9176,8 @@ static void ggml_compute_forward_norm_f32( float eps; memcpy(&eps, dst->op_params, sizeof(float)); + GGML_ASSERT(eps > 0.0f); + // TODO: optimize for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { @@ -9237,6 +9247,8 @@ static void ggml_compute_forward_rms_norm_f32( float eps; memcpy(&eps, dst->op_params, sizeof(float)); + GGML_ASSERT(eps > 0.0f); + // TODO: optimize for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { @@ -11562,10 +11574,13 @@ static void ggml_compute_forward_rope_f32( } } else { // TODO: this might be wrong for ne0 != n_dims - need double check - // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28 + // it seems we have to rope just the first n_dims elements and do nothing with the rest + // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26 theta_base *= freq_scale; - for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { - for (int64_t ic = 0; ic < n_dims; ic += 2) { + for (int64_t ic = 0; ic < ne0; ic += 2) { + if (ic < n_dims) { + const int64_t ib = 0; + // simplified from `(ib * n_dims + ic) * inv_ndims` float cur_rot = inv_ndims * ic - ib; @@ -11588,6 +11603,14 @@ static void ggml_compute_forward_rope_f32( dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; + } else { + const int64_t i0 = ic; + + const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; } } } @@ -11715,10 +11738,13 @@ static void ggml_compute_forward_rope_f16( } } else { // TODO: this might be wrong for ne0 != n_dims - need double check - // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28 + // it seems we have to rope just the first n_dims elements and do nothing with the rest + // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26 theta_base *= freq_scale; - for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { - for (int64_t ic = 0; ic < n_dims; ic += 2) { + for (int64_t ic = 0; ic < ne0; ic += 2) { + if (ic < n_dims) { + const int64_t ib = 0; + // simplified from `(ib * n_dims + ic) * inv_ndims` float cur_rot = inv_ndims * ic - ib; @@ -11741,6 +11767,14 @@ static void ggml_compute_forward_rope_f16( dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + } else { + const int64_t i0 = ic; + + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; } } } diff --git a/ggml.h b/ggml.h index 68f7833b6..f1003984f 100644 --- a/ggml.h +++ b/ggml.h @@ -343,6 +343,12 @@ extern "C" { GGML_TYPE_COUNT, }; + // precision + enum ggml_prec { + GGML_PREC_DEFAULT, + GGML_PREC_F32, + }; + enum ggml_backend_type { GGML_BACKEND_CPU = 0, GGML_BACKEND_GPU = 10, @@ -1057,6 +1063,12 @@ extern "C" { struct ggml_tensor * a, struct ggml_tensor * b); + // change the precision of a matrix multiplication + // set to GGML_PREC_F32 for higher precision (useful for phi-2) + GGML_API void ggml_mul_mat_set_prec( + struct ggml_tensor * a, + enum ggml_prec prec); + // indirect matrix multiplication // ggml_mul_mat_id(ctx, as, ids, id, b) ~= ggml_mul_mat(as[ids[id]], b) GGML_API struct ggml_tensor * ggml_mul_mat_id( diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 12133882b..390dca049 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -95,6 +95,7 @@ class MODEL_ARCH(IntEnum): BLOOM = auto() STABLELM = auto() QWEN = auto() + PHI2 = auto() class MODEL_TENSOR(IntEnum): @@ -140,6 +141,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.BLOOM: "bloom", MODEL_ARCH.STABLELM: "stablelm", MODEL_ARCH.QWEN: "qwen", + MODEL_ARCH.PHI2: "phi2", } TENSOR_NAMES: dict[MODEL_TENSOR, str] = { @@ -350,6 +352,17 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_ARCH.GPT2: [ # TODO ], + MODEL_ARCH.PHI2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ] # TODO } diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 0115ea1c6..6fcbdbc1c 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -17,6 +17,7 @@ class TensorNameMap: "tok_embeddings", # llama-pth "embeddings.word_embeddings", # bert "language_model.embedding.word_embeddings", # persimmon + "transformer.embd.wte", # phi2 ), # Token type embeddings @@ -41,6 +42,7 @@ class TensorNameMap: "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen "output", # llama-pth bloom "word_embeddings_for_head", # persimmon + "lm_head.linear", # phi2 ), # Output norm @@ -53,6 +55,7 @@ class TensorNameMap: "transformer.norm_f", # mpt "ln_f", # refact bloom qwen "language_model.encoder.final_layernorm", # persimmon + "lm_head.ln", # phi2 ), # Rope frequencies @@ -75,6 +78,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.output.LayerNorm", # bert "language_model.encoder.layers.{bid}.input_layernorm", # persimmon "model.layers.{bid}.ln1", # yi + "transformer.h.{bid}.ln", # phi2 ), # Attention norm 2 @@ -90,6 +94,7 @@ class TensorNameMap: "transformer.h.{bid}.self_attention.query_key_value", # falcon "h.{bid}.self_attention.query_key_value", # bloom "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon + "transformer.h.{bid}.mixer.Wqkv", # phi2 ), # Attention query @@ -128,6 +133,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.output.dense", # bert "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon + "transformer.h.{bid}.mixer.out_proj", # phi2 ), # Rotary embeddings @@ -167,6 +173,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.fc_in", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon "transformer.h.{bid}.mlp.w1", # qwen + "transformer.h.{bid}.mlp.fc1", # phi2 ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -198,6 +205,7 @@ class TensorNameMap: "encoder.layer.{bid}.output.dense", # bert "transformer.h.{bid}.mlp.fc_out", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "transformer.h.{bid}.mlp.fc2", # phi2 ), MODEL_TENSOR.FFN_DOWN_EXP: ( diff --git a/llama.cpp b/llama.cpp index 99facbf77..edd2910b3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -195,6 +195,7 @@ enum llm_arch { LLM_ARCH_BLOOM, LLM_ARCH_STABLELM, LLM_ARCH_QWEN, + LLM_ARCH_PHI2, LLM_ARCH_UNKNOWN, }; @@ -212,6 +213,7 @@ static std::map LLM_ARCH_NAMES = { { LLM_ARCH_BLOOM, "bloom" }, { LLM_ARCH_STABLELM, "stablelm" }, { LLM_ARCH_QWEN, "qwen" }, + { LLM_ARCH_PHI2, "phi2" }, }; enum llm_kv { @@ -550,6 +552,19 @@ static std::map> LLM_TENSOR_NAMES = { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PHI2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_UNKNOWN, @@ -1420,6 +1435,7 @@ struct llama_model { struct ggml_tensor * output_norm; struct ggml_tensor * output_norm_b; struct ggml_tensor * output; + struct ggml_tensor * output_b; std::vector layers; @@ -2635,6 +2651,15 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_PHI2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; default: (void)0; } @@ -2987,7 +3012,7 @@ static void llm_load_tensors( (void) main_gpu; - enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU; + enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU; enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU; #ifdef GGML_USE_CUBLAS @@ -3630,7 +3655,73 @@ static void llm_load_tensors( } } } break; + case LLM_ARCH_PHI2: + { + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + // output + { + ggml_backend_type backend_norm; + ggml_backend_type backend_output; + + if (n_gpu_layers > int(n_layer)) { + backend_norm = llama_backend_offload; + backend_output = llama_backend_offload; + } else { + backend_norm = GGML_BACKEND_CPU; + backend_output = GGML_BACKEND_CPU; + } + + model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); + model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); + model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output); + + if (backend_norm == GGML_BACKEND_GPU) { + vram_weights += ggml_nbytes(model.output_norm); + vram_weights += ggml_nbytes(model.output_norm_b); + vram_weights += ggml_nbytes(model.output); + vram_weights += ggml_nbytes(model.output_b); + } + } + + const uint32_t n_ff = hparams.n_ff; + + const int i_gpu_start = n_layer - n_gpu_layers; + + model.layers.resize(n_layer); + + for (uint32_t i = 0; i < n_layer; ++i) { + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + + auto & layer = model.layers[i]; + + layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + + layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + + layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + + if (backend == GGML_BACKEND_GPU) { + vram_weights += + ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + + ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + + ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + + ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) + + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b); + } + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -3991,6 +4082,7 @@ static struct ggml_tensor * llm_build_ffn( // if max_alibi_bias > 0 then apply ALiBi static struct ggml_tensor * llm_build_kqv( struct ggml_context * ctx, + const llama_model & model, const llama_hparams & hparams, const llama_kv_cache & kv, struct ggml_tensor * wo, @@ -4002,6 +4094,7 @@ static struct ggml_tensor * llm_build_kqv( int32_t n_tokens, int32_t n_kv, float max_alibi_bias, + float scale, const llm_build_cb & cb, int il) { const int64_t n_embd = hparams.n_embd; @@ -4024,6 +4117,12 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); cb(kq, "kq", il); + if (model.arch == LLM_ARCH_PHI2) { + // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs + // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847 + ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + } + if (max_alibi_bias > 0.0f) { // temporary branch until we figure out how to handle ggml_alibi through ggml_add kq = ggml_scale(ctx, kq, kq_scale); @@ -4043,7 +4142,7 @@ static struct ggml_tensor * llm_build_kqv( kq = ggml_soft_max(ctx, kq); cb(kq, "kq_soft_max", il); } else { - kq = ggml_soft_max_ext(ctx, kq, kq_mask, 1.0f/sqrtf(float(n_embd_head))); + kq = ggml_soft_max_ext(ctx, kq, kq_mask, scale); cb(kq, "kq_soft_max_ext", il); } @@ -4250,9 +4349,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4433,9 +4532,9 @@ struct llm_build_context { // apply ALiBi for 13B model const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f; - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4557,9 +4656,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4657,9 +4756,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4866,9 +4965,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); // TODO: not tested, could be broken - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4957,9 +5056,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5054,9 +5153,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5148,9 +5247,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5261,9 +5360,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5320,15 +5419,15 @@ struct llm_build_context { cb(inpL, "inp_embd", -1); // inp_pos - contains the positions - struct ggml_tensor * inp_pos= ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); // KQ_scale - struct ggml_tensor * KQ_scale= ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask= ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); // shift the entire K-cache if needed @@ -5378,9 +5477,9 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, hparams, kv_self, + cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5422,6 +5521,122 @@ struct llm_build_context { ggml_build_forward_expand(gf, cur); + return gf; + } + struct ggml_cgraph * build_phi2() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); + + struct ggml_tensor * cur; + struct ggml_tensor * attn_norm_output; + struct ggml_tensor * ffn_output; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); + + // Q_scale + struct ggml_tensor * Q_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(Q_scale, "Q_scale", -1); + + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); + + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } + + for (int il = 0; il < n_layer; ++il) { + attn_norm_output = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(attn_norm_output, "attn_norm", il); + + // self-attention + { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_custom( + ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Qcur = ggml_scale(ctx0, Qcur, Q_scale); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_custom( + ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(ctx0, model, hparams, kv_self, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il); + cb(cur, "kqv_out", il); + } + + // FF + { + ffn_output = llm_build_ffn(ctx0, attn_norm_output, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(ffn_output, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_output); + cb(cur, "l_out", il); + + cur = ggml_add(ctx0, cur, inpL); + cb(cur, "l_out", il); + + inpL = cur; + } + + cur = llm_build_norm(ctx0, inpL, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); + + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output_no_bias", -1); + + cur = ggml_add(ctx0, cur, model.output_b); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + return gf; } }; @@ -5437,7 +5652,7 @@ enum llm_offload_func_e { OFFLOAD_FUNC_FRC, // force offload OFFLOAD_FUNC_KQV, OFFLOAD_FUNC_NR, - OFFLOAD_FUNC_EMB, + OFFLOAD_FUNC_EMB, // embeddings OFFLOAD_FUNC_OUT, }; @@ -5522,6 +5737,7 @@ static const std::unordered_map k_offload_map { "pos_embd", OFFLOAD_FUNC_NR }, { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope) + { "Q_scale", OFFLOAD_FUNC_FRC }, { "KQ_scale", OFFLOAD_FUNC_FRC }, { "KQ_mask", OFFLOAD_FUNC_FRC }, { "K_shift", OFFLOAD_FUNC_FRC }, @@ -5606,6 +5822,7 @@ static const std::unordered_map k_offload_map { "l_out", OFFLOAD_FUNC }, { "result_norm", OFFLOAD_FUNC_EMB }, + { "result_output_no_bias", OFFLOAD_FUNC_EMB }, { "result_output", OFFLOAD_FUNC_OUT }, }; @@ -5623,6 +5840,7 @@ static struct ggml_cgraph * llama_build_graph( bool alloc_inp_tokens = false; bool alloc_inp_embd = false; bool alloc_inp_pos = false; + bool alloc_inp_Q_scale = false; bool alloc_inp_KQ_scale = false; bool alloc_inp_KQ_mask = false; bool alloc_inp_K_shift = false; @@ -5690,7 +5908,7 @@ static struct ggml_cgraph * llama_build_graph( alloc_inp_pos = true; } - if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) { + if (!alloc_inp_Q_scale && strcmp(name, "Q_scale") == 0) { ggml_allocr_alloc(lctx.alloc, cur); if (!ggml_allocr_is_measure(lctx.alloc)) { @@ -5698,6 +5916,23 @@ static struct ggml_cgraph * llama_build_graph( ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head))); } + alloc_inp_Q_scale = true; + } + + if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); + + if (!ggml_allocr_is_measure(lctx.alloc)) { + const int64_t n_embd_head = model.hparams.n_embd_head(); + if (model.arch == LLM_ARCH_PHI2) { + // with phi2, we scale the Q to avoid precision issues + // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 + ggml_set_f32(cur, 1.0f); + } else { + ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head))); + } + } + alloc_inp_KQ_scale = true; } @@ -5922,6 +6157,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_qwen(); } break; + case LLM_ARCH_PHI2: + { + result = llm.build_phi2(); + } break; default: GGML_ASSERT(false); } @@ -6055,12 +6294,16 @@ static int llama_decode_internal( ggml_allocr_alloc_graph(lctx.alloc, gf); - struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; + // the output is always the last tensor in the graph + struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; + GGML_ASSERT(strcmp(res->name, "result_output") == 0); + + // the embeddings could be the second to last tensor, or the third to last tensor struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; - - GGML_ASSERT(strcmp(res->name, "result_output") == 0); - GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); - + if (strcmp(embeddings->name, "result_norm") != 0) { + embeddings = gf->nodes[gf->n_nodes - 3]; + GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); + } #ifdef GGML_USE_CUBLAS for (int i = 0; i < gf->n_leafs; i++) { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index df2c3fb6e..f04b9438a 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1555,6 +1555,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op test_cases.emplace_back(new test_rope(type, { 64, 8, 10, 1}, 64, 2, 512)); // neox (falcon 40B) test_cases.emplace_back(new test_rope(type, { 64, 128, 10, 1}, 64, 2, 512)); // neox (falcon 40B) test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 20, 2, 512)); // neox (stablelm) + test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 32, 2, 512)); // neox (phi-2) } test_cases.emplace_back(new test_alibi()); From 6ff39b129d0281d045f83d515e51b7197b44b253 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 18 Dec 2023 20:05:12 +0200 Subject: [PATCH 14/84] llama.swiftui : add more models --- .../llama.cpp.swift/LibLlama.swift | 2 +- .../llama.swiftui/UI/ContentView.swift | 31 +++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 272e1fd8a..464fb3277 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -203,7 +203,7 @@ actor LlamaContext { var pp_std: Double = 0 var tg_std: Double = 0 - for r in 0.. Date: Mon, 18 Dec 2023 20:17:43 +0200 Subject: [PATCH 15/84] llama.swiftui : add tinyllama 1.1B F16 --- .../llama.swiftui/llama.swiftui/UI/ContentView.swift | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift index 9cbe8efd6..c78f107b3 100644 --- a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift +++ b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift @@ -91,6 +91,15 @@ struct ContentView: View { ) .font(.system(size: 12)) + DownloadButton( + llamaState: llamaState, + modelName: "TinyLlama-1.1B (F16, 2.2 GiB)", + modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true", + filename: "tinyllama-1.1b-f16.gguf" + ) + .font(.system(size: 12)) + .frame(maxWidth: .infinity, alignment: .leading) + DownloadButton( llamaState: llamaState, modelName: "Phi-2.7B (Q4_0, 1.6 GiB)", @@ -98,7 +107,6 @@ struct ContentView: View { filename: "phi-2-q4_0.gguf" ) .font(.system(size: 12)) - .frame(maxWidth: .infinity, alignment: .leading) DownloadButton( llamaState: llamaState, @@ -107,6 +115,7 @@ struct ContentView: View { filename: "phi-2-q8_0.gguf" ) .font(.system(size: 12)) + .frame(maxWidth: .infinity, alignment: .leading) DownloadButton( llamaState: llamaState, @@ -115,7 +124,6 @@ struct ContentView: View { filename: "mistral-7b-v0.1.Q4_0.gguf" ) .font(.system(size: 12)) - .frame(maxWidth: .infinity, alignment: .leading) Button("Clear downloaded models") { ContentView.cleanupModelCaches() From a7aee47b98e45539d491071b25778b833b77e387 Mon Sep 17 00:00:00 2001 From: arlo-phoenix <140345165+arlo-phoenix@users.noreply.github.com> Date: Mon, 18 Dec 2023 22:33:45 +0100 Subject: [PATCH 16/84] ggml-cuda: Fix HIP build (#4528) regression of #4490 Adds defines for two new datatypes cublasComputeType_t, cudaDataType_t. Currently using deprecated hipblasDatatype_t since newer ones very recent. --- ggml-cuda.cu | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index d0f3d8034..f20846fef 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -31,6 +31,7 @@ #define CUDA_R_16F HIPBLAS_R_16F #define CUDA_R_32F HIPBLAS_R_32F #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) +#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6 #define cublasCreate hipblasCreate #define cublasGemmEx hipblasGemmEx #define cublasGemmBatchedEx hipblasGemmBatchedEx @@ -40,6 +41,7 @@ #define cublasSetStream hipblasSetStream #define cublasSgemm hipblasSgemm #define cublasStatus_t hipblasStatus_t +#define cudaDataType_t hipblasDatatype_t //deprecated, new hipblasDatatype not in 5.6 #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess From 328b83de23b33240e28f4e74900d1d06726f5eb1 Mon Sep 17 00:00:00 2001 From: Eric Sommerlade Date: Tue, 19 Dec 2023 16:17:01 +0000 Subject: [PATCH 17/84] ggml : fixed check for _MSC_VER (#4535) Co-authored-by: Eric Sommerlade --- ggml.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.h b/ggml.h index f1003984f..beacdc8be 100644 --- a/ggml.h +++ b/ggml.h @@ -303,7 +303,7 @@ extern "C" { #if defined(__ARM_NEON) && defined(__CUDACC__) typedef half ggml_fp16_t; -#elif defined(__ARM_NEON) +#elif defined(__ARM_NEON) && !defined(_MSC_VER) typedef __fp16 ggml_fp16_t; #else typedef uint16_t ggml_fp16_t; From 799fc2268989482054944c902874cca76337580f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 20 Dec 2023 15:41:22 +0100 Subject: [PATCH 18/84] CUDA: Faster Mixtral prompt processing (#4538) * CUDA: make MoE tensors contiguous for batch size>1 * Update ggml-cuda.cu Co-authored-by: slaren --------- Co-authored-by: slaren --- ggml-cuda.cu | 118 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 93 insertions(+), 25 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f20846fef..9f4b188cb 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7830,6 +7830,11 @@ static void ggml_cuda_set_peer_access(const int n_tokens) { } #ifdef NDEBUG + for (int id = 0; id < g_device_count; ++id) { + CUDA_CHECK(ggml_cuda_set_device(id)); + CUDA_CHECK(cudaDeviceSynchronize()); + } + for (int id = 0; id < g_device_count; ++id) { CUDA_CHECK(ggml_cuda_set_device(id)); @@ -7881,8 +7886,6 @@ static void ggml_cuda_op_mul_mat( const int nb2 = dst->nb[2]; const int nb3 = dst->nb[3]; - ggml_cuda_set_peer_access(ne11); - GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT); GGML_ASSERT(src1->backend != GGML_BACKEND_GPU_SPLIT); @@ -8781,16 +8784,21 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(dst->backend == GGML_BACKEND_GPU); + const int64_t nb11 = src1->nb[1]; + const int64_t nb1 = dst->nb[1]; + const struct ggml_tensor * ids = src0; const int32_t id = ((int32_t *) dst->op_params)[0]; const int32_t n_as = ((int32_t *) dst->op_params)[1]; std::vector ids_host(ggml_nbytes(ids)); + const cudaStream_t stream = g_cudaStreams[g_main_device][0]; + if (ids->backend == GGML_BACKEND_GPU) { const char * ids_dev = (const char *)((const ggml_tensor_extra_gpu *)ids->extra)->data_device[g_main_device]; - CUDA_CHECK(cudaMemcpyAsync(ids_host.data(), ids_dev, ggml_nbytes(ids), cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0])); - CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[g_main_device][0])); + CUDA_CHECK(cudaMemcpyAsync(ids_host.data(), ids_dev, ggml_nbytes(ids), cudaMemcpyDeviceToHost, stream)); + CUDA_CHECK(cudaStreamSynchronize(stream)); } else { memcpy(ids_host.data(), ids->data, ggml_nbytes(ids)); } @@ -8804,37 +8812,93 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s ggml_tensor src1_row = *src1; ggml_tensor dst_row = *dst; - src1_row.ne[1] = 1; - dst_row.ne[1] = 1; - - src1_row.nb[2] = src1_row.nb[1]; - dst_row.nb[2] = dst_row.nb[1]; - - src1_row.nb[3] = src1_row.nb[1]; - dst_row.nb[3] = dst_row.nb[1]; - src1_row.extra = &src1_row_extra; dst_row.extra = &dst_row_extra; + char * src1_original = (char *) src1_extra->data_device[g_main_device]; + char * dst_original = (char *) dst_extra->data_device[g_main_device]; - for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { - //int32_t row_id; - //CUDA_CHECK(cudaMemcpyAsync(&row_id, ids_dev + i01*ids->nb[1] + id*ids->nb[0], sizeof(int32_t), cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0])); - //CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[g_main_device][0])); + if (src1->ne[1] == 1) { + for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { + //int32_t row_id; + //CUDA_CHECK(cudaMemcpyAsync(&row_id, ids_dev + i01*ids->nb[1] + id*ids->nb[0], sizeof(int32_t), cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0])); + //CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[g_main_device][0])); - const int32_t row_id = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); + const int32_t row_id = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); - GGML_ASSERT(row_id >= 0 && row_id < n_as); + GGML_ASSERT(row_id >= 0 && row_id < n_as); - const struct ggml_tensor * src0_row = dst->src[row_id + 2]; + const struct ggml_tensor * src0_row = dst->src[row_id + 2]; - src1_row_extra.data_device[g_main_device] = (char *) src1_extra->data_device[g_main_device] + i01*src1->nb[1]; - src1_row.data = (char *) src1->data + i01*src1->nb[1]; + src1_row_extra.data_device[g_main_device] = src1_original + i01*src1->nb[1]; + src1_row.data = (char *) src1->data + i01*src1->nb[1]; // TODO why is this set? - dst_row_extra.data_device[g_main_device] = (char *) dst_extra->data_device[g_main_device] + i01*dst->nb[1]; - dst_row.data = (char *) dst->data + i01*dst->nb[1]; + dst_row_extra.data_device[g_main_device] = dst_original + i01*dst->nb[1]; + dst_row.data = (char *) dst->data + i01*dst->nb[1]; // TODO why is this set? - ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); + ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); + } + } else { + size_t as_src1, as_dst; + char * src1_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(src1), &as_src1); + char * dst_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(dst), &as_dst); + + src1_row_extra.data_device[g_main_device] = src1_contiguous; + dst_row_extra.data_device[g_main_device] = dst_contiguous; + + for (int32_t row_id = 0; row_id < n_as; ++row_id) { + const struct ggml_tensor * src0_row = dst->src[row_id + 2]; + + int64_t num_src1_rows = 0; + for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { + const int32_t row_id_i = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); + + if (row_id_i != row_id) { + continue; + } + + GGML_ASSERT(row_id >= 0 && row_id < n_as); + + CUDA_CHECK(cudaMemcpyAsync(src1_contiguous + num_src1_rows*nb11, src1_original + i01*nb11, + nb11, cudaMemcpyDeviceToDevice, stream)); + num_src1_rows++; + } + + if (num_src1_rows == 0) { + continue; + } + + src1_row.ne[1] = num_src1_rows; + dst_row.ne[1] = num_src1_rows; + + src1_row.nb[1] = nb11; + src1_row.nb[2] = num_src1_rows*nb11; + src1_row.nb[3] = num_src1_rows*nb11; + + dst_row.nb[1] = nb1; + dst_row.nb[2] = num_src1_rows*nb1; + dst_row.nb[3] = num_src1_rows*nb1; + + ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); + + num_src1_rows = 0; + for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { + const int32_t row_id_i = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); + + if (row_id_i != row_id) { + continue; + } + + GGML_ASSERT(row_id >= 0 && row_id < n_as); + + CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous + num_src1_rows*nb1, + nb1, cudaMemcpyDeviceToDevice, stream)); + num_src1_rows++; + } + } + + ggml_cuda_pool_free(src1_contiguous, as_src1); + ggml_cuda_pool_free(dst_contiguous, as_dst); } } @@ -9370,6 +9434,10 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ return false; } + if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT) { + ggml_cuda_set_peer_access(tensor->src[1]->ne[1]); + } + if (params->ith != 0) { return true; } From 1d7a1912cea2227f9a1a449758ed622c560542f9 Mon Sep 17 00:00:00 2001 From: LoganDark Date: Thu, 21 Dec 2023 01:59:27 -0800 Subject: [PATCH 19/84] Fix access violation in ggml_cuda_free_data if tensor->extra is NULL (#4554) --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 9f4b188cb..28d378784 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9091,7 +9091,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { } void ggml_cuda_free_data(struct ggml_tensor * tensor) { - if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) { + if (!tensor || !tensor->extra || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) { return; } From d3223afdad0ed2821a8ddf739c291cd410c92a11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Thu, 21 Dec 2023 17:34:17 +0100 Subject: [PATCH 20/84] llama : disable per-tensor info prints on model load (#4562) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index edd2910b3..90d860eb9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2083,7 +2083,7 @@ struct llama_model_loader { type_max = meta->type; } - LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); } switch (type_max) { From 139882392258671ffe5acdfcadc0bc08572d6eef Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 21 Dec 2023 18:02:30 +0100 Subject: [PATCH 21/84] cuda : replace asserts in wrong architecture checks with __trap (#4556) * cuda : replace asserts in wrong architecture checks with __trap * make bad_arch noreturn, remove returns --- ggml-cuda.cu | 82 +++++++++++++++++++++++----------------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 28d378784..e7c9dee45 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -512,6 +512,14 @@ static size_t g_scratch_offset = 0; static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; +[[noreturn]] +static __device__ void bad_arch() { + printf("ERROR: ggml-cuda was compiled without support for the current GPU architecture.\n"); + __trap(); + + (void) bad_arch; // suppress unused function warning +} + static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { @@ -1972,8 +1980,7 @@ template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_imp // second part effectively subtracts 8 from each quant value return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2010,8 +2017,7 @@ template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_imp // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2046,8 +2052,7 @@ template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_imp // second part effectively subtracts 16 from each quant value return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2092,8 +2097,7 @@ template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_imp return sumi*d5d8 + m5s8 / (QI5_1 / vdr); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2114,8 +2118,7 @@ template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_imp return d8_0*d8_1 * sumi; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2145,8 +2148,7 @@ template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_imp // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it return sumi*d8d8 + m8s8 / (QI8_1 / vdr); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2181,8 +2183,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( return dm2f.x*sumf_d - dm2f.y*sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2219,8 +2220,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m); #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2260,8 +2260,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( return d3 * sumf; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2286,8 +2285,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( return d3*d8 * sumi; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2320,8 +2318,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( return dm4f.x*sumf_d - dm4f.y*sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2354,8 +2351,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( return dm4f.x*sumf_d - dm4f.y*sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2395,8 +2391,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( return dm5f.x*sumf_d - dm5f.y*sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2429,8 +2424,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( return dm4f.x*sumf_d - dm4f.y*sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2460,8 +2454,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( return d*sumf; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -2492,8 +2485,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( return d6 * sumf_d; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } @@ -3359,8 +3351,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1( return dall * sumf_d - dmin * sumf_m; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A #endif @@ -3543,8 +3534,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1( return d * sumf_d; #else - assert(false); - return 0.0f; // only to satisfy the compiler + bad_arch(); #endif // __CUDA_ARCH__ >= MIN_CC_DP4A #endif @@ -3954,7 +3944,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q4_0_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4023,7 +4013,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q4_1_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4090,7 +4080,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q5_0_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4157,7 +4147,7 @@ mul_mat_q5_1( (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q5_1_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4224,7 +4214,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q8_0_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4291,7 +4281,7 @@ mul_mat_q2_K( (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q2_K_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4360,7 +4350,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q3_K_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4429,7 +4419,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q4_K_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4496,7 +4486,7 @@ mul_mat_q5_K( (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q5_K_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } @@ -4565,7 +4555,7 @@ template static __global__ void (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); #else (void) vec_dot_q6_K_q8_1_mul_mat; - assert(false); + bad_arch(); #endif // __CUDA_ARCH__ >= CC_VOLTA } From 66f35a2f48e1965a13835a523e677223dbf148be Mon Sep 17 00:00:00 2001 From: bobqianic <129547291+bobqianic@users.noreply.github.com> Date: Thu, 21 Dec 2023 17:06:44 +0000 Subject: [PATCH 22/84] cuda : better error message for ggml_get_rows (#4561) * Update ggml-cuda.cu * Update ggml-cuda.cu * Update ggml-cuda.cu --------- Co-authored-by: Georgi Gerganov --- ggml-cuda.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index e7c9dee45..1ca071d90 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6815,6 +6815,7 @@ static void ggml_cuda_op_get_rows( break; default: // TODO: k-quants + fprintf(stderr, "%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type)); GGML_ASSERT(false); break; } From 880e352277fc017df4d5794f0c21c44e1eae2b84 Mon Sep 17 00:00:00 2001 From: howlger Date: Thu, 21 Dec 2023 18:07:34 +0100 Subject: [PATCH 23/84] py : open merges file as 'utf-8' (#4566) Otherwise, on Windows converting bling-phi-2-v0 () via convert-hf-to-gguf.py will fail with the following error: ``` Traceback (most recent call last): File "C:\Users\User\git\gguf\convert-hf-to-gguf.py", line 1061, in model_instance.set_vocab() File "C:\Users\User\git\gguf\convert-hf-to-gguf.py", line 52, in set_vocab self._set_vocab_gpt2() File "C:\Users\User\git\gguf\convert-hf-to-gguf.py", line 264, in _set_vocab_gpt2 special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) File "C:\Users\User\git\gguf\gguf\vocab.py", line 33, in __init__ self._load(Path(path)) File "C:\Users\User\git\gguf\gguf\vocab.py", line 81, in _load self._try_load_merges_txt(path) File "C:\Users\User\git\gguf\gguf\vocab.py", line 95, in _try_load_merges_txt for line in fp: File "C:\Users\User\miniconda3\envs\gguf\lib\encodings\cp1252.py", line 23, in decode return codecs.charmap_decode(input,self.errors,decoding_table)[0] UnicodeDecodeError: 'charmap' codec can't decode byte 0x81 in position 1415: character maps to ``` --- gguf-py/gguf/vocab.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 76924d8f2..cd1942975 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -84,7 +84,7 @@ class SpecialVocab: merges_file = path / 'merges.txt' if not merges_file.is_file(): return False - with open(merges_file, 'r') as fp: + with open(merges_file, 'r', encoding = 'utf-8') as fp: first_line = next(fp, '').strip() if not first_line.startswith('#'): fp.seek(0) From c083718c895b7c8c7fb2a4660643fb78d0c64dfd Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 21 Dec 2023 19:27:14 +0200 Subject: [PATCH 24/84] readme : update coding guidelines --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 01aef2afc..80ce194ca 100644 --- a/README.md +++ b/README.md @@ -982,6 +982,8 @@ docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m / - There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit - Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a` - See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions +- Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices +- Matrix multiplication is unconventional: [`z = ggml_mul_mat(ctx, x, y)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means `zT = x @ yT` ### Docs From 9154494808dc865475c59022c29060b4947a803b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Thu, 21 Dec 2023 18:42:59 +0100 Subject: [PATCH 25/84] CUDA: mul_mat_id always on GPU for batches >= 32 (#4553) --- ggml-cuda.cu | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 1ca071d90..036668bfd 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -8773,8 +8773,6 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s // TODO: mmq/mmv support #endif - GGML_ASSERT(dst->backend == GGML_BACKEND_GPU); - const int64_t nb11 = src1->nb[1]; const int64_t nb1 = dst->nb[1]; @@ -8803,13 +8801,21 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s ggml_tensor src1_row = *src1; ggml_tensor dst_row = *dst; + src1_row.backend = GGML_BACKEND_GPU; + dst_row.backend = GGML_BACKEND_GPU; + src1_row.extra = &src1_row_extra; dst_row.extra = &dst_row_extra; - char * src1_original = (char *) src1_extra->data_device[g_main_device]; - char * dst_original = (char *) dst_extra->data_device[g_main_device]; + char * src1_original = src1->backend == GGML_BACKEND_CPU ? + (char *) src1->data : (char *) src1_extra->data_device[g_main_device]; + char * dst_original = dst->backend == GGML_BACKEND_CPU ? + (char *) dst->data : (char *) dst_extra->data_device[g_main_device]; if (src1->ne[1] == 1) { + GGML_ASSERT(src1->backend == GGML_BACKEND_GPU); + GGML_ASSERT(dst->backend == GGML_BACKEND_GPU); + for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { //int32_t row_id; //CUDA_CHECK(cudaMemcpyAsync(&row_id, ids_dev + i01*ids->nb[1] + id*ids->nb[0], sizeof(int32_t), cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0])); @@ -8837,6 +8843,11 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s src1_row_extra.data_device[g_main_device] = src1_contiguous; dst_row_extra.data_device[g_main_device] = dst_contiguous; + const cudaMemcpyKind src1_kind = src1->backend == GGML_BACKEND_CPU ? + cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; + const cudaMemcpyKind dst_kind = dst->backend == GGML_BACKEND_CPU ? + cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; + for (int32_t row_id = 0; row_id < n_as; ++row_id) { const struct ggml_tensor * src0_row = dst->src[row_id + 2]; @@ -8851,7 +8862,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); CUDA_CHECK(cudaMemcpyAsync(src1_contiguous + num_src1_rows*nb11, src1_original + i01*nb11, - nb11, cudaMemcpyDeviceToDevice, stream)); + nb11, src1_kind, stream)); num_src1_rows++; } @@ -8883,7 +8894,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous + num_src1_rows*nb1, - nb1, cudaMemcpyDeviceToDevice, stream)); + nb1, dst_kind, stream)); num_src1_rows++; } } @@ -8891,6 +8902,10 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s ggml_cuda_pool_free(src1_contiguous, as_src1); ggml_cuda_pool_free(dst_contiguous, as_dst); } + + if (dst->backend == GGML_BACKEND_CPU) { + CUDA_CHECK(cudaStreamSynchronize(stream)); + } } static void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -9289,7 +9304,7 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ || (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) || (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_GPU); - if (!any_on_device && tensor->op != GGML_OP_MUL_MAT) { + if (!any_on_device && tensor->op != GGML_OP_MUL_MAT && tensor->op != GGML_OP_MUL_MAT_ID) { return false; } From 8fe03ffddaaa0ab5d48feaafe398151c9f22d4f6 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 21 Dec 2023 12:55:34 -0500 Subject: [PATCH 26/84] common : remove incorrect --model-draft default (#4568) --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 93d5483e4..b3425ab09 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -920,7 +920,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -m FNAME, --model FNAME\n"); printf(" model path (default: %s)\n", params.model.c_str()); printf(" -md FNAME, --model-draft FNAME\n"); - printf(" draft model for speculative decoding (default: %s)\n", params.model.c_str()); + printf(" draft model for speculative decoding\n"); printf(" -ld LOGDIR, --logdir LOGDIR\n"); printf(" path under which to save YAML logs (no logging if unset)\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); From 562cf222b5129e40b312877e928eac3a02e4ec33 Mon Sep 17 00:00:00 2001 From: arlo-phoenix <140345165+arlo-phoenix@users.noreply.github.com> Date: Thu, 21 Dec 2023 20:13:25 +0100 Subject: [PATCH 27/84] ggml-cuda: Fix HIP build by adding define for __trap (#4569) Regression of 139882392258671ffe5acdfcadc0bc08572d6eef HIP doesn't have trap, only abort --- ggml-cuda.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 036668bfd..61d92d7ef 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -80,6 +80,7 @@ #define cudaStreamWaitEvent(stream, event, flags) hipStreamWaitEvent(stream, event, flags) #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess +#define __trap abort #else #include #include From 0f630fbc924aaabeea6eaf466bb4b47d13015c3e Mon Sep 17 00:00:00 2001 From: Erik Garrison Date: Thu, 21 Dec 2023 13:45:32 -0600 Subject: [PATCH 28/84] cuda : ROCm AMD Unified Memory Architecture (UMA) handling (#4449) * AMD ROCm: handle UMA memory VRAM expansions This resolves #2797 by allowing ROCm AMD GPU users with a UMA to dynamically expand the VRAM allocated to the GPU. Without this, AMD ROCm users with shared CPU/GPU memory usually are stuck with the BIOS-set (or fixed) framebuffer VRAM, making it impossible to load more than 1-2 layers. Note that the model is duplicated in RAM because it's loaded once for the CPU and then copied into a second set of allocations that are managed by the HIP UMA system. We can fix this later. * clarify build process for ROCm on linux with cmake * avoid using deprecated ROCm hipMallocHost * keep simplifying the change required for UMA * cmake: enable UMA-compatible allocation when LLAMA_HIP_UMA=ON --- CMakeLists.txt | 4 ++++ README.md | 16 +++++++++------- ggml-cuda.cu | 5 +++++ 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e3cd43ab3..6fc6508c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -91,6 +91,7 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING "llama: max. batch size for using peer access") option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF) +option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF) option(LLAMA_CLBLAST "llama: use CLBlast" OFF) option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT}) option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF) @@ -377,6 +378,9 @@ if (LLAMA_HIPBLAS) if (${hipblas_FOUND} AND ${hip_FOUND}) message(STATUS "HIP and hipBLAS found") add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS) + if (LLAMA_HIP_UMA) + add_compile_definitions(GGML_HIP_UMA) + endif() add_library(ggml-rocm OBJECT ggml-cuda.cu ggml-cuda.h) if (BUILD_SHARED_LIBS) set_target_properties(ggml-rocm PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/README.md b/README.md index 80ce194ca..73fe59bb4 100644 --- a/README.md +++ b/README.md @@ -432,14 +432,15 @@ Building the program with BLAS support may lead to some performance improvements ```bash make LLAMA_HIPBLAS=1 ``` - - Using `CMake` for Linux: + - Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU): ```bash - mkdir build - cd build - CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ cmake .. -DLLAMA_HIPBLAS=ON - cmake --build . + CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ \ + cmake -H. -Bbuild -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ + && cmake --build build -- -j 16 ``` - - Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS): + On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`. + However, this hurts performance for non-integrated GPUs. + - Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): ```bash set PATH=%HIP_PATH%\bin;%PATH% mkdir build @@ -448,10 +449,11 @@ Building the program with BLAS support may lead to some performance improvements cmake --build . ``` Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors) + Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`. The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. - If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 or 11.0.0 on RDNA3. + If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above): | Option | Legal values | Default | Description | diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 61d92d7ef..32603a8d1 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -60,8 +60,13 @@ #define cudaGetDeviceProperties hipGetDeviceProperties #define cudaGetErrorString hipGetErrorString #define cudaGetLastError hipGetLastError +#ifdef GGML_HIP_UMA +#define cudaMalloc hipMallocManaged +#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size) +#else #define cudaMalloc hipMalloc #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) +#endif #define cudaMemcpy hipMemcpy #define cudaMemcpy2DAsync hipMemcpy2DAsync #define cudaMemcpyAsync hipMemcpyAsync From 56fa50819f7a3ca2128f63b81c17c08a4454479e Mon Sep 17 00:00:00 2001 From: Finn Voorhees Date: Thu, 21 Dec 2023 14:55:02 -0500 Subject: [PATCH 29/84] metal : fix `ggml_metal_log` vargs (#4373) From 31f27758faf4a4bd08101a57c7ec3a473f771f86 Mon Sep 17 00:00:00 2001 From: Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com> Date: Thu, 21 Dec 2023 11:57:48 -0800 Subject: [PATCH 30/84] llama : allow getting n_batch from llama_context in c api (#4540) * allowed getting n_batch from llama_context in c api * changed to use `uint32_t` instead of `int` * changed to use `uint32_t` instead of `int` in `llama_n_ctx` * Update llama.h --------- Co-authored-by: Georgi Gerganov --- llama.cpp | 6 +++++- llama.h | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 90d860eb9..63ebe581b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9532,10 +9532,14 @@ const llama_model * llama_get_model(const struct llama_context * ctx) { return &ctx->model; } -int llama_n_ctx(const struct llama_context * ctx) { +uint32_t llama_n_ctx(const struct llama_context * ctx) { return ctx->cparams.n_ctx; } +uint32_t llama_n_batch(const struct llama_context * ctx) { + return ctx->cparams.n_batch; +} + enum llama_vocab_type llama_vocab_type(const struct llama_model * model) { return model->vocab.type; } diff --git a/llama.h b/llama.h index 15ab4f80e..0be4b1337 100644 --- a/llama.h +++ b/llama.h @@ -314,7 +314,9 @@ extern "C" { LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); - LLAMA_API int llama_n_ctx (const struct llama_context * ctx); + // TODO: become more consistent with returned int types across the API + LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx); + LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx); LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model); From d232aca5a73b290e218a2e48b91023d5e994203f Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 21 Dec 2023 21:07:46 +0100 Subject: [PATCH 31/84] llama : initial ggml-backend integration (#4520) * llama : initial ggml-backend integration * add ggml-metal * cuda backend can be used though ggml-backend with LLAMA_GGML_BACKEND_CUDA_TEST access all tensor data with ggml_backend_tensor_get/set * add ggml_backend_buffer_clear zero-init KV cache buffer * add ggml_backend_buffer_is_hos, used to avoid copies if possible when accesing tensor data * disable gpu backends with ngl 0 * more accurate mlock * unmap offloaded part of the model * use posix_fadvise64(.., POSIX_FADV_SEQUENTIAL) to improve performance with mmap * update quantize and lora * update session copy/set to use ggml-backend ggml-ci * use posix_fadvise instead of posix_fadvise64 * ggml_backend_alloc_ctx_tensors_from_buft : remove old print * llama_mmap::align_offset : use pointers instead of references for out parameters * restore progress_callback behavior * move final progress_callback call to load_all_data * cuda : fix fprintf format string (minor) * do not offload scales * llama_mmap : avoid unmapping the same fragments again in the destructor * remove unnecessary unmap * metal : add default log function that prints to stderr, cleanup code ggml-ci --------- Co-authored-by: Georgi Gerganov --- Makefile | 2 +- ggml-alloc.c | 16 +- ggml-backend-impl.h | 20 +- ggml-backend.c | 80 ++- ggml-backend.h | 7 + ggml-cuda.cu | 89 ++-- ggml-metal.h | 3 + ggml-metal.m | 228 +++++++-- ggml.c | 24 +- ggml.h | 13 +- llama.cpp | 1196 ++++++++++++++++++++----------------------- 11 files changed, 926 insertions(+), 752 deletions(-) diff --git a/Makefile b/Makefile index 8273f8400..512407a1d 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ test: $(TEST_TARGETS) ./$$test_target; \ fi; \ if [ $$? -ne 0 ]; then \ - printf 'Test $$test_target FAILED!\n\n' $$test_target; \ + printf 'Test %s FAILED!\n\n' $$test_target; \ failures=$$(( failures + 1 )); \ else \ printf 'Test %s passed.\n\n' $$test_target; \ diff --git a/ggml-alloc.c b/ggml-alloc.c index d3049efb4..a97436b17 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -449,11 +449,10 @@ static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool upd if (update_backend) { view->backend = view->view_src->backend; } - view->buffer = view->view_src->buffer; + // views are initialized in the alloc buffer rather than the view_src buffer + view->buffer = alloc->buffer; view->data = (char *)view->view_src->data + view->view_offs; - // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend - // due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft); if (!alloc->measure) { @@ -736,6 +735,10 @@ void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n) { } void ggml_allocr_free(ggml_allocr_t alloc) { + if (alloc == NULL) { + return; + } + ggml_gallocr_free(alloc->galloc); ggml_tallocr_free(alloc->talloc); free(alloc); @@ -775,7 +778,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte } if (nbytes == 0) { - fprintf(stderr, "%s: no tensors to allocate\n", __func__); + // all the tensors in the context are already allocated return NULL; } @@ -789,6 +792,11 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte } else { ggml_backend_view_init(buffer, t); } + } else { + if (t->view_src != NULL) { + // view of a pre-allocated tensor + ggml_backend_view_init(buffer, t); + } } } diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h index f588af602..05859935a 100644 --- a/ggml-backend-impl.h +++ b/ggml-backend-impl.h @@ -20,6 +20,9 @@ extern "C" { size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend + // check if tensor data is in host memory + // should be equivalent to supports_backend(buft, ggml_backend_cpu_init()) + bool (*is_host) (ggml_backend_buffer_type_t buft); }; struct ggml_backend_buffer_type { @@ -31,15 +34,16 @@ extern "C" { typedef void * ggml_backend_buffer_context_t; struct ggml_backend_buffer_i { - void (*free_buffer)(ggml_backend_buffer_t buffer); + void (*free_buffer) (ggml_backend_buffer_t buffer); //void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras - void * (*get_base) (ggml_backend_buffer_t buffer); - void (*init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + void * (*get_base) (ggml_backend_buffer_t buffer); + void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // (optional) copy tensor between different buffer-type, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); }; struct ggml_backend_buffer { @@ -78,7 +82,7 @@ extern "C" { void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*synchronize) (ggml_backend_t backend); + void (*synchronize)(ggml_backend_t backend); // compute graph with a plan ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); diff --git a/ggml-backend.c b/ggml-backend.c index 3a22cd085..0c8c9ec43 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -35,6 +35,13 @@ bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_ba return buft->iface.supports_backend(buft, backend); } +bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { + if (buft->iface.is_host) { + return buft->iface.is_host(buft); + } + return false; +} + // backend buffer ggml_backend_buffer_t ggml_backend_buffer_init( @@ -94,6 +101,14 @@ size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct g return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type(buffer), tensor); } +void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + buffer->iface.clear(buffer, value); +} + +bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { + return ggml_backend_buft_is_host(ggml_backend_buffer_type(buffer)); +} + ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer) { return buffer->buft; } @@ -378,7 +393,6 @@ static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { free(buffer->context); - GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { @@ -411,6 +425,10 @@ static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, GGML_UNUSED(buffer); } +static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + memset(buffer->context, value, buffer->size); +} + static struct ggml_backend_buffer_i cpu_backend_buffer_i = { /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, /* .get_base = */ ggml_backend_cpu_buffer_get_base, @@ -419,6 +437,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i = { /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, + /* .clear = */ ggml_backend_cpu_buffer_clear, }; // for buffers from ptr, free is not called @@ -430,6 +449,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, + /* .clear = */ ggml_backend_cpu_buffer_clear, }; static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512 @@ -455,20 +475,70 @@ static bool ggml_backend_cpu_buffer_type_supports_backend(ggml_backend_buffer_ty GGML_UNUSED(buft); } +static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + return true; + + GGML_UNUSED(buft); +} + ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_cpu = { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { /* .iface = */ { /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend, + /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, }, /* .context = */ NULL, }; - return &ggml_backend_buffer_type_cpu; + return &ggml_backend_cpu_buffer_type; } +#ifdef GGML_USE_CPU_HBM + +// buffer type HBM + +#include + +static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { + hbw_free(buffer->context); +} + +static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + //void * ptr = hbw_malloc(size); + void * ptr; + int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size); + if (result != 0) { + fprintf(stderr, "failed to allocate HBM buffer of size %zu\n", size); + return NULL; + } + + // FIXME: this is a hack to avoid having to implement a new buffer type + ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); + buffer->buft = buft; + buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; + + return buffer; +} + +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type() { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { + /* .iface = */ { + /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, + /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes + /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend, + /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, + }, + /* .context = */ NULL, + }; + + return &ggml_backend_cpu_buffer_type_hbm; +} +#endif + struct ggml_backend_cpu_context { int n_threads; void * work_data; @@ -505,7 +575,7 @@ static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu)); cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads); - cpu_plan->cgraph = *cgraph; + cpu_plan->cgraph = *cgraph; // FIXME: deep copy if (cpu_plan->cplan.work_size > 0) { cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size); @@ -1180,7 +1250,7 @@ void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml // utils void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { GGML_ASSERT(tensor->buffer == NULL); - GGML_ASSERT(tensor->data == NULL); + //GGML_ASSERT(tensor->data == NULL); // views of pre-allocted tensors may have the data set, but still need to be initialized GGML_ASSERT(tensor->view_src != NULL); GGML_ASSERT(tensor->view_src->buffer != NULL); GGML_ASSERT(tensor->view_src->data != NULL); diff --git a/ggml-backend.h b/ggml-backend.h index 58d5ccae6..a9d2fddd7 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -21,6 +21,7 @@ extern "C" { GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend); + GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); // buffer GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); @@ -29,6 +30,8 @@ extern "C" { GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); + GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer); // @@ -76,6 +79,10 @@ extern "C" { GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void); +#ifdef GGML_USE_CPU_HBM + GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); +#endif + // // Backend registry // diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 32603a8d1..f5e060d32 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9081,7 +9081,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { char * buf; CUDA_CHECK(cudaMalloc(&buf, size)); - char * buf_host = (char*)data + offset_split; + char * buf_host = (char *)data + offset_split; // set padding to 0 to avoid possible NaN values if (size > original_size) { @@ -9226,11 +9226,10 @@ void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra(); - const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) || - tensor->op == GGML_OP_VIEW; + const bool inplace = tensor->view_src != nullptr; - if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) { - ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra; + if (inplace && (tensor->view_src->backend == GGML_BACKEND_GPU || tensor->view_src->backend == GGML_BACKEND_GPU_SPLIT)) { + ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->view_src->extra; char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; size_t view_offset = 0; if (tensor->op == GGML_OP_VIEW) { @@ -9317,7 +9316,7 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ if (tensor->op == GGML_OP_MUL_MAT) { if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) { #ifndef NDEBUG - fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = " PRId64 ", src1->ne[3] = " PRId64 " - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]); + fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = %" PRId64 ", src1->ne[3] = %" PRId64 " - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]); #endif return false; } @@ -9523,7 +9522,7 @@ static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, g ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; if (tensor->view_src != NULL && tensor->view_offs == 0) { - assert(tensor->view_src->buffer->buft == buffer->buft); // TODO + assert(tensor->view_src->buffer->buft == buffer->buft); tensor->backend = tensor->view_src->backend; tensor->extra = tensor->view_src->extra; return; @@ -9554,23 +9553,34 @@ static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, g } static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); - CUDA_CHECK(cudaMemcpy((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice)); + ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; - UNUSED(buffer); + ggml_cuda_set_device(ctx->device); + CUDA_CHECK(cudaDeviceSynchronize()); + + CUDA_CHECK(cudaMemcpy((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice)); } static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); - CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost)); + ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; - UNUSED(buffer); + ggml_cuda_set_device(ctx->device); + CUDA_CHECK(cudaDeviceSynchronize()); + + CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost)); +} + +static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + + ggml_cuda_set_device(ctx->device); + CUDA_CHECK(cudaDeviceSynchronize()); + + CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size)); } static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { @@ -9581,6 +9591,7 @@ static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { /* .get_tensor = */ ggml_backend_cuda_buffer_get_tensor, /* .cpy_tensor_from = */ NULL, /* .cpy_tensor_to = */ NULL, + /* .clear = */ ggml_backend_cuda_buffer_clear, }; // cuda buffer type @@ -9632,35 +9643,36 @@ static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_t UNUSED(buft); } -static ggml_backend_buffer_type_i cuda_backend_buffer_type_interface = { +static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { /* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment, /* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size, /* .supports_backend = */ ggml_backend_cuda_buffer_type_supports_backend, + /* .is_host = */ nullptr, }; ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_cuda[GGML_CUDA_MAX_DEVICES]; - static bool ggml_backend_buffer_type_cuda_initialized = false; - if (!ggml_backend_buffer_type_cuda_initialized) { + static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; + + static bool ggml_backend_cuda_buffer_type_initialized = false; + + if (!ggml_backend_cuda_buffer_type_initialized) { for (int i = 0; i < GGML_CUDA_MAX_DEVICES; i++) { - ggml_backend_buffer_type_cuda[i] = { - /* .iface = */ cuda_backend_buffer_type_interface, + ggml_backend_cuda_buffer_types[i] = { + /* .iface = */ ggml_backend_cuda_buffer_type_interface, /* .context = */ (ggml_backend_buffer_type_context_t) (intptr_t) i, }; } - ggml_backend_buffer_type_cuda_initialized = true; + ggml_backend_cuda_buffer_type_initialized = true; } - return &ggml_backend_buffer_type_cuda[device]; + return &ggml_backend_cuda_buffer_types[device]; } // host buffer type static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; - CUDA_CHECK(cudaFreeHost(ctx->dev_ptr)); - delete ctx; + CUDA_CHECK(cudaFreeHost(buffer->context)); } static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { @@ -9673,24 +9685,21 @@ static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggm buffer->iface.free_buffer = ggml_backend_cuda_host_buffer_free_buffer; return buffer; - - UNUSED(buft); } -struct ggml_backend_buffer_type_i cuda_backend_host_buffer_type_interface = { - /* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, - /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, - /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend, -}; - ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_cuda_host = { - /* .iface = */ cuda_backend_host_buffer_type_interface, + static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_type_host = { + /* .iface = */ { + /* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, + /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, + /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend, + /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, + }, /* .context = */ nullptr, }; - return &ggml_backend_buffer_type_cuda_host; + return &ggml_backend_cuda_buffer_type_host; } // backend @@ -9722,8 +9731,6 @@ static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tens ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); CUDA_CHECK(cudaMemcpyAsync((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice, g_cudaStreams[cuda_ctx->device][0])); @@ -9733,8 +9740,6 @@ static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggm ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, g_cudaStreams[cuda_ctx->device][0])); diff --git a/ggml-metal.h b/ggml-metal.h index bf52d9cd3..b5e02b668 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -98,7 +98,10 @@ GGML_API ggml_backend_t ggml_backend_metal_init(void); GGML_API bool ggml_backend_is_metal(ggml_backend_t backend); +GGML_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size); + GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb); + GGML_API ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void); // helper to check if the device supports a specific family diff --git a/ggml-metal.m b/ggml-metal.m index 465679a6b..e60b93b36 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -180,7 +180,15 @@ struct ggml_metal_context { @implementation GGMLMetalClass @end -ggml_log_callback ggml_metal_log_callback = NULL; + +static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) { + fprintf(stderr, "%s", msg); + + UNUSED(level); + UNUSED(user_data); +} + +ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback; void * ggml_metal_log_user_data = NULL; void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) { @@ -607,12 +615,24 @@ int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) { } // temporarily defined here for compatibility between ggml-backend and the old API -struct ggml_backend_metal_buffer_context { - void * data; + +struct ggml_backend_metal_buffer { + void * data; + size_t size; id metal; }; +struct ggml_backend_metal_buffer_context { + void * all_data; + size_t all_size; + bool owned; + + // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap + int n_buffers; + struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS]; +}; + // finds the Metal buffer that contains the tensor data on the GPU device // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the // Metal buffer based on the host memory pointer @@ -622,17 +642,29 @@ static id ggml_metal_get_buffer(struct ggml_metal_context * ctx, stru const int64_t tsize = ggml_nbytes(t); + ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer; + // compatibility with ggml-backend - if (t->buffer && t->buffer->buft == ggml_backend_metal_buffer_type()) { - struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) t->buffer->context; + if (buffer && buffer->buft == ggml_backend_metal_buffer_type()) { + struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context; - const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->data; + // find the view that contains the tensor fully + for (int i = 0; i < buf_ctx->n_buffers; ++i) { + const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data; - GGML_ASSERT(ioffs >= 0 && ioffs + tsize <= (int64_t) t->buffer->size); + //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size); + if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) { + *offs = (size_t) ioffs; - *offs = (size_t) ioffs; + //GGML_METAL_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs); - return buf_ctx->metal; + return buf_ctx->buffers[i].metal; + } + } + + GGML_METAL_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name); + + return nil; } // find the view that contains the tensor fully @@ -2361,6 +2393,7 @@ void ggml_metal_graph_compute( // backend interface +// default buffer static id g_backend_device = nil; static int g_backend_device_ref_count = 0; @@ -2388,34 +2421,31 @@ static void ggml_backend_metal_free_device(void) { static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context; - return ctx->data; + return ctx->all_data; } static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) { struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context; - [ctx->metal release]; + for (int i = 0; i < ctx->n_buffers; i++) { + [ctx->buffers[i].metal release]; + } ggml_backend_metal_free_device(); - free(ctx->data); - free(ctx); + if (ctx->owned) { + free(ctx->all_data); + } - UNUSED(buffer); + free(ctx); } static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy((char *)tensor->data + offset, data, size); UNUSED(buffer); } static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy(data, (const char *)tensor->data + offset, size); UNUSED(buffer); @@ -2433,7 +2463,13 @@ static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer UNUSED(buffer); } -static struct ggml_backend_buffer_i metal_backend_buffer_i = { +static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context; + + memset(ctx->all_data, value, ctx->all_size); +} + +static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = { /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer, /* .get_base = */ ggml_backend_metal_buffer_get_base, /* .init_tensor = */ NULL, @@ -2441,8 +2477,11 @@ static struct ggml_backend_buffer_i metal_backend_buffer_i = { /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor, /* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to, + /* .clear = */ ggml_backend_metal_buffer_clear, }; +// default buffer type + static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context)); @@ -2453,13 +2492,46 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba size_aligned += (size_page - (size_aligned % size_page)); } - ctx->data = ggml_metal_host_malloc(size); - ctx->metal = [ggml_backend_metal_get_device() newBufferWithBytesNoCopy:ctx->data + id device = ggml_backend_metal_get_device(); + + ctx->all_data = ggml_metal_host_malloc(size_aligned); + ctx->all_size = size_aligned; + ctx->owned = true; + ctx->n_buffers = 1; + + ctx->buffers[0].data = ctx->all_data; + ctx->buffers[0].size = size; + ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; - return ggml_backend_buffer_init(buft, metal_backend_buffer_i, ctx, size); + if (ctx->buffers[0].metal == nil) { + GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); + free(ctx); + ggml_backend_metal_free_device(); + return NULL; + } + + GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0); + + +#if TARGET_OS_OSX + GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", + device.currentAllocatedSize / 1024.0 / 1024.0, + device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); + + if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) { + GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__); + } else { + GGML_METAL_LOG_INFO("\n"); + } +#else + GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0); +#endif + + + return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size); } static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { @@ -2470,7 +2542,13 @@ static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_t static bool ggml_backend_metal_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { return ggml_backend_is_metal(backend) || ggml_backend_is_cpu(backend); - GGML_UNUSED(buft); + UNUSED(buft); +} + +static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + return true; + + UNUSED(buft); } ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { @@ -2480,6 +2558,7 @@ ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment, /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes /* .supports_backend = */ ggml_backend_metal_buffer_type_supports_backend, + /* .is_host = */ ggml_backend_metal_buffer_type_is_host, }, /* .context = */ NULL, }; @@ -2487,6 +2566,87 @@ ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { return &ggml_backend_buffer_type_metal; } +// buffer from ptr + +ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) { + struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context)); + + ctx->all_data = data; + ctx->all_size = size; + ctx->owned = false; + ctx->n_buffers = 0; + + const size_t size_page = sysconf(_SC_PAGESIZE); + size_t size_aligned = size; + if ((size_aligned % size_page) != 0) { + size_aligned += (size_page - (size_aligned % size_page)); + } + + id device = ggml_backend_metal_get_device(); + + // the buffer fits into the max buffer size allowed by the device + if (size_aligned <= device.maxBufferLength) { + ctx->buffers[ctx->n_buffers].data = data; + ctx->buffers[ctx->n_buffers].size = size; + + ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; + + if (ctx->buffers[ctx->n_buffers].metal == nil) { + GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); + return false; + } + + GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0); + + ++ctx->n_buffers; + } else { + // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into + // one of the views + const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case + const size_t size_step = device.maxBufferLength - size_ovlp; + const size_t size_view = device.maxBufferLength; + + for (size_t i = 0; i < size; i += size_step) { + const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i); + + ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i); + ctx->buffers[ctx->n_buffers].size = size_step_aligned; + + ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil]; + + if (ctx->buffers[ctx->n_buffers].metal == nil) { + GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0); + return false; + } + + GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i); + if (i + size_step < size) { + GGML_METAL_LOG_INFO("\n"); + } + + ++ctx->n_buffers; + } + } + +#if TARGET_OS_OSX + GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", + device.currentAllocatedSize / 1024.0 / 1024.0, + device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); + + if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) { + GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__); + } else { + GGML_METAL_LOG_INFO("\n"); + } +#else + GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0); +#endif + + return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size); +} + +// backend + static const char * ggml_backend_metal_name(ggml_backend_t backend) { return "Metal"; @@ -2499,10 +2659,6 @@ static void ggml_backend_metal_free(ggml_backend_t backend) { free(backend); } -static void ggml_backend_metal_synchronize(ggml_backend_t backend) { - UNUSED(backend); -} - static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggml_backend_t backend) { return ggml_backend_metal_buffer_type(); @@ -2529,25 +2685,15 @@ static struct ggml_backend_i metal_backend_i = { /* .get_tensor_async = */ NULL, /* .cpy_tensor_from_async = */ NULL, /* .cpy_tensor_to_async = */ NULL, - /* .synchronize = */ ggml_backend_metal_synchronize, - /* .graph_plan_create = */ NULL, // the metal implementation does not require creating graph plans atm + /* .synchronize = */ NULL, + /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_metal_graph_compute, /* .supports_op = */ ggml_backend_metal_supports_op, }; -// TODO: make a common log callback for all backends in ggml-backend -static void ggml_backend_log_callback(enum ggml_log_level level, const char * msg, void * user_data) { - fprintf(stderr, "%s", msg); - - UNUSED(level); - UNUSED(user_data); -} - ggml_backend_t ggml_backend_metal_init(void) { - ggml_metal_log_set_callback(ggml_backend_log_callback, NULL); - struct ggml_metal_context * ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS); if (ctx == NULL) { diff --git a/ggml.c b/ggml.c index 6da65bd92..236148514 100644 --- a/ggml.c +++ b/ggml.c @@ -2383,20 +2383,8 @@ size_t ggml_get_mem_size(const struct ggml_context * ctx) { size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) { size_t max_size = 0; - struct ggml_object * obj = ctx->objects_begin; - - while (obj != NULL) { - if (obj->type == GGML_OBJECT_TENSOR) { - struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs); - - const size_t size = ggml_nbytes(tensor); - - if (max_size < size) { - max_size = size; - } - } - - obj = obj->next; + for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) { + max_size = MAX(max_size, ggml_nbytes(tensor)); } return max_size; @@ -3093,7 +3081,7 @@ struct ggml_tensor * ggml_view_tensor( return result; } -struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx) { +struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) { struct ggml_object * obj = ctx->objects_begin; char * const mem_buffer = ctx->mem_buffer; @@ -3109,7 +3097,7 @@ struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx) { return NULL; } -struct ggml_tensor * ggml_get_next_tensor(struct ggml_context * ctx, struct ggml_tensor * tensor) { +struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struct ggml_tensor * tensor) { struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE); obj = obj->next; @@ -19213,6 +19201,10 @@ char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) { return ctx->infos[i].name.data; } +enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) { + return ctx->infos[i].type; +} + // returns the index static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) { const int idx = gguf_find_key(ctx, key); diff --git a/ggml.h b/ggml.h index beacdc8be..b17314897 100644 --- a/ggml.h +++ b/ggml.h @@ -735,8 +735,8 @@ extern "C" { GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src); // Context tensor enumeration and lookup - GGML_API struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx); - GGML_API struct ggml_tensor * ggml_get_next_tensor (struct ggml_context * ctx, struct ggml_tensor * tensor); + GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx); + GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); @@ -2135,10 +2135,11 @@ extern "C" { GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id); GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i); - GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx); - GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name); - GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i); - GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i); + GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx); + GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name); + GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i); + GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i); + GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i); // overrides existing values or adds a new one GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); diff --git a/llama.cpp b/llama.cpp index 63ebe581b..ba970ce8d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1,11 +1,12 @@ #define LLAMA_API_INTERNAL +//#define LLAMA_GGML_BACKEND_CUDA_TEST // for testing only - enables ggml-cuda through ggml-backend, disables partial offloading #include "llama.h" #include "unicode.h" #include "ggml.h" - #include "ggml-alloc.h" +#include "ggml-backend.h" #ifdef GGML_USE_CUBLAS # include "ggml-cuda.h" @@ -32,6 +33,7 @@ #include #if defined(_POSIX_MAPPED_FILES) #include + #include #endif #if defined(_POSIX_MEMLOCK_RANGE) #include @@ -712,38 +714,6 @@ static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * // llama helpers // -inline void * llama_host_malloc(size_t n) { -#ifdef GGML_USE_CUBLAS - if (ggml_cublas_loaded()) { - return ggml_cuda_host_malloc(n); - } else { - return malloc(n); - } -#elif GGML_USE_METAL - return ggml_metal_host_malloc(n); -#elif GGML_USE_CPU_HBM - return hbw_malloc(n); -#else - return malloc(n); -#endif -} - -inline void llama_host_free(void * ptr) { -#ifdef GGML_USE_CUBLAS - if (ggml_cublas_loaded()) { - return ggml_cuda_host_free(ptr); - } else { - return free(ptr); - } -#elif GGML_USE_METAL - return ggml_metal_host_free(ptr); -#elif GGML_USE_CPU_HBM - return hbw_free(ptr); -#else - return free(ptr); -#endif -} - #if defined(_WIN32) static std::string llama_format_win_err(DWORD err) { LPSTR buf; @@ -758,40 +728,10 @@ static std::string llama_format_win_err(DWORD err) { } #endif -struct llama_buffer { - void * data = NULL; - size_t size = 0; - - // fallback to malloc / free - // useful in cases where CUDA can try to allocate PINNED memory - bool fallback = false; - - void resize(size_t n) { - llama_host_free(data); - - data = llama_host_malloc(n); - if (!data) { - fallback = true; - data = malloc(n); - } else { - fallback = false; - } - - GGML_ASSERT(data); - size = n; - } - - ~llama_buffer() { - if (data) { - if (fallback) { // NOLINT - free(data); - } else { - llama_host_free(data); - } - } - - data = NULL; - } +template +struct no_init { + T value; + no_init() { /* do nothing */ } }; struct llama_file { @@ -879,6 +819,9 @@ struct llama_mmap { #ifdef _POSIX_MAPPED_FILES static constexpr bool SUPPORTED = true; + // list of mapped fragments (first_offset, last_offset) + std::vector> mapped_fragments; + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) { size = file->size; int fd = fileno(file->fp); @@ -886,17 +829,22 @@ struct llama_mmap { // prefetch/readahead impairs performance on NUMA systems if (numa) { prefetch = 0; } #ifdef __linux__ + // advise the kernel to read the file sequentially (increases readahead) + if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { + LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n", + strerror(errno)); + } if (prefetch) { flags |= MAP_POPULATE; } #endif addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); - if (addr == MAP_FAILED) { + if (addr == MAP_FAILED) { // NOLINT throw std::runtime_error(format("mmap failed: %s", strerror(errno))); } if (prefetch > 0) { - // Advise the kernel to preload the mapped memory + // advise the kernel to preload the mapped memory if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) { - fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n", + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n", strerror(errno)); } } @@ -904,14 +852,81 @@ struct llama_mmap { // advise the kernel not to use readahead // (because the next page might not belong on the same node) if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) { - fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n", + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n", strerror(errno)); } } + + // initialize list of mapped_fragments + mapped_fragments.emplace_back(0, file->size); + } + + static void align_range(size_t * first, size_t * last, size_t page_size) { + // align first to the next page + size_t offset_in_page = *first & (page_size - 1); + size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page; + *first += offset_to_page; + + // align last to the previous page + *last = *last & ~(page_size - 1); + + if (*last <= *first) { + *last = *first; + } + } + + // partially unmap the file in the range [first, last) + void unmap_fragment(size_t first, size_t last) { + // note: this function must not be called multiple times with overlapping ranges + // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings + int page_size = sysconf(_SC_PAGESIZE); + align_range(&first, &last, page_size); + size_t len = last - first; + + if (len == 0) { + return; + } + + GGML_ASSERT(first % page_size == 0); + GGML_ASSERT(last % page_size == 0); + GGML_ASSERT(last > first); + + void * next_page_start = (uint8_t *) addr + first; + + // unmap the range + if (munmap(next_page_start, len)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + + // update the list of mapped fragments to avoid unmapping the same range again in the destructor + std::vector> new_mapped_fragments; + for (const auto & frag : mapped_fragments) { + if (frag.first < first && frag.second > last) { + // the range is in the middle of the fragment, split it + new_mapped_fragments.emplace_back(frag.first, first); + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first < first && frag.second > first) { + // the range starts in the middle of the fragment + new_mapped_fragments.emplace_back(frag.first, first); + } else if (frag.first < last && frag.second > last) { + // the range ends in the middle of the fragment + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first >= first && frag.second <= last) { + // the range covers the entire fragment + } else { + // the range is outside the fragment + new_mapped_fragments.push_back(frag); + } + } + mapped_fragments = std::move(new_mapped_fragments); } ~llama_mmap() { - munmap(addr, size); + for (const auto & frag : mapped_fragments) { + if (munmap((char *) addr + frag.first, frag.second - frag.first)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + } } #elif defined(_WIN32) static constexpr bool SUPPORTED = true; @@ -959,6 +974,12 @@ struct llama_mmap { } } + void unmap_fragment(size_t first, size_t last) { + // not supported + GGML_UNUSED(first); + GGML_UNUSED(last); + } + ~llama_mmap() { if (!UnmapViewOfFile(addr)) { fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", @@ -975,6 +996,13 @@ struct llama_mmap { throw std::runtime_error(std::string("mmap not supported")); } + + void unmap(size_t offset, size_t len) { + (void) offset; + (void) len; + + throw std::runtime_error(std::string("mmap not supported")); + } #endif }; @@ -1148,6 +1176,26 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_ return std::string(result.data(), result.size()); } +static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) { +#ifdef GGML_USE_METAL + if (n_gpu_layers > 0) { + return ggml_backend_metal_buffer_type(); + } +#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) + if (n_gpu_layers > 0) { + return ggml_backend_cuda_buffer_type(0); + } +#elif defined(GGML_USE_CUBLAS) + return ggml_backend_cuda_host_buffer_type(); +#elif defined(GGML_USE_CPU_HBM) + return ggml_backend_cpu_hbm_buffer_type(); +#endif + + return ggml_backend_cpu_buffer_type(); + + GGML_UNUSED(n_gpu_layers); +} + // // globals // @@ -1348,14 +1396,10 @@ struct llama_kv_cache { struct ggml_context * ctx = NULL; - llama_buffer buf; + ggml_backend_buffer_t buf = NULL; ~llama_kv_cache() { - if (ctx) { - ggml_free(ctx); - } - -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (ggml_cublas_loaded()) { for (size_t i = 0; i < k_l.size(); ++i) { ggml_cuda_free_data(k_l[i]); @@ -1363,6 +1407,11 @@ struct llama_kv_cache { } } #endif + if (ctx) { + ggml_free(ctx); + } + + ggml_backend_buffer_free(buf); } }; @@ -1402,11 +1451,11 @@ struct llama_vocab { id special_suffix_id = 32008; id special_eot_id = 32010; - int find_bpe_rank(std::string token_left, std::string token_right) const { - GGML_ASSERT(token_left.find(" ") == std::string::npos); - GGML_ASSERT(token_left.find("\n") == std::string::npos); - GGML_ASSERT(token_right.find(" ") == std::string::npos); - GGML_ASSERT(token_right.find("\n") == std::string::npos); + int find_bpe_rank(const std::string & token_left, const std::string & token_right) const { + GGML_ASSERT(token_left.find(' ') == std::string::npos); + GGML_ASSERT(token_left.find('\n') == std::string::npos); + GGML_ASSERT(token_right.find(' ') == std::string::npos); + GGML_ASSERT(token_right.find('\n') == std::string::npos); auto it = bpe_ranks.find(std::make_pair(token_left, token_right)); if (it == bpe_ranks.end()) { @@ -1448,7 +1497,7 @@ struct llama_model { struct ggml_context * ctx = NULL; // the model memory buffer - llama_buffer buf; + ggml_backend_buffer_t buf = NULL; // model memory mapped file std::unique_ptr mapping; @@ -1464,11 +1513,7 @@ struct llama_model { int64_t t_start_us = 0; ~llama_model() { - if (ctx) { - ggml_free(ctx); - } - -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (ggml_cublas_loaded()) { for (size_t i = 0; i < tensors_by_name.size(); ++i) { ggml_cuda_free_data(tensors_by_name[i].second); @@ -1482,24 +1527,26 @@ struct llama_model { ggml_cl_free_data(tensors_by_name[i].second); } #endif + if (ctx) { + ggml_free(ctx); + } + + ggml_backend_buffer_free(buf); } }; struct llama_context { llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {} ~llama_context() { -#ifdef GGML_USE_METAL - if (ctx_metal) { - ggml_metal_free(ctx_metal); - } -#endif - if (alloc) { - ggml_allocr_free(alloc); - } + ggml_allocr_free(alloc); + ggml_backend_buffer_free(buf_alloc); + ggml_backend_free(backend); } llama_cparams cparams; + ggml_backend_t backend = nullptr; + const llama_model & model; // key + value cache for the self attention @@ -1530,18 +1577,13 @@ struct llama_context { // input embedding (1-dimensional array: [n_embd]) std::vector embedding; - // reusable buffer for `struct ggml_graph_plan.work_data` - std::vector work_buffer; - // memory buffers used to evaluate the model - llama_buffer buf_compute; - - llama_buffer buf_alloc; + std::vector buf_compute_meta; + ggml_backend_buffer_t buf_alloc = NULL; ggml_allocr * alloc = NULL; -#ifdef GGML_USE_METAL - ggml_metal_context * ctx_metal = NULL; -#endif + // temporary buffer for copying data to/from the backend + std::vector> buf_copy; #ifdef GGML_USE_MPI ggml_mpi_context * ctx_mpi = NULL; @@ -1563,9 +1605,6 @@ static bool llama_kv_cache_init( const uint32_t n_embd = hparams.n_embd_gqa(); const uint32_t n_layer = hparams.n_layer; - const int64_t n_mem = n_layer*n_ctx; - const int64_t n_elements = n_embd*n_mem; - cache.has_shift = false; cache.head = 0; @@ -1575,13 +1614,10 @@ static bool llama_kv_cache_init( cache.cells.clear(); cache.cells.resize(n_ctx); - cache.buf.resize(ggml_row_size(ktype, n_elements) + ggml_row_size(vtype, n_elements) + 2u*n_layer*ggml_tensor_overhead()); - memset(cache.buf.data, 0, cache.buf.size); - struct ggml_init_params params; - params.mem_size = cache.buf.size; - params.mem_buffer = cache.buf.data; - params.no_alloc = false; + params.mem_size = 2u*n_layer*ggml_tensor_overhead(); + params.mem_buffer = NULL; + params.no_alloc = true; cache.ctx = ggml_init(params); @@ -1595,9 +1631,7 @@ static bool llama_kv_cache_init( cache.k_l.reserve(n_layer); cache.v_l.reserve(n_layer); - const int i_gpu_start = (int) n_layer - n_gpu_layers; GGML_UNUSED(i_gpu_start); - - GGML_UNUSED(offload); + const int i_gpu_start = (int) n_layer - n_gpu_layers; for (int i = 0; i < (int) n_layer; i++) { ggml_tensor * k = ggml_new_tensor_1d(cache.ctx, ktype, n_embd*n_ctx); @@ -1606,23 +1640,35 @@ static bool llama_kv_cache_init( ggml_format_name(v, "cache_v_l%d", i); cache.k_l.push_back(k); cache.v_l.push_back(v); -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (i >= i_gpu_start) { if (offload) { ggml_cuda_assign_buffers_no_scratch(k); - vram_kv_cache += ggml_nbytes(k); ggml_cuda_assign_buffers_no_scratch(v); + vram_kv_cache += ggml_nbytes(k); vram_kv_cache += ggml_nbytes(v); + // HACK: mark tensor as allocated + k->data = v->data = (void *)(uintptr_t)1; } } #endif // GGML_USE_CUBLAS } + // allocate tensors + cache.buf = ggml_backend_alloc_ctx_tensors_from_buft(cache.ctx, llama_default_buffer_type(n_gpu_layers)); + + // buf may be NULL with full offload + if (cache.buf) { + // initialize the buffer to avoid NaNs in the padding + ggml_backend_buffer_clear(cache.buf, 0); + } + if (vram_kv_cache > 0) { LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0); } - GGML_UNUSED(n_gpu_layers); + GGML_UNUSED(i_gpu_start); + GGML_UNUSED(offload); return true; } @@ -2073,14 +2119,13 @@ struct llama_model_loader { enum ggml_type type_max = GGML_TYPE_F32; for (int i = 0; i < n_tensors; i++) { - const char * name = gguf_get_tensor_name(ctx_gguf, i); - struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name); + enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i); - n_type[meta->type]++; + n_type[type]++; - if (n_type_max < n_type[meta->type]) { - n_type_max = n_type[meta->type]; - type_max = meta->type; + if (n_type_max < n_type[type]) { + n_type_max = n_type[type]; + type_max = type; } // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); @@ -2221,34 +2266,19 @@ struct llama_model_loader { return gguf_get_tensor_name(ctx_gguf, i); } - struct ggml_tensor * get_tensor_meta(int i) const { - return ggml_get_tensor(ctx_meta, get_tensor_name(i)); + struct ggml_tensor * get_tensor_meta(const char * name) const { + return ggml_get_tensor(ctx_meta, name); } - void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const { - ctx_size_p = 0; - mmapped_size_p = 0; - - for (int i = 0; i < n_tensors; i++) { - struct ggml_tensor * meta = get_tensor_meta(i); - ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; - (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta); - } + struct ggml_tensor * get_tensor_meta(int i) const { + return get_tensor_meta(get_tensor_name(i)); } struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) { - if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ctx, true); - } - struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta); tensor->backend = backend; // TODO: ggml_set_backend ggml_set_name(tensor, ggml_get_name(meta)); - if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ctx, use_mmap); - } - n_created++; return tensor; @@ -2306,90 +2336,137 @@ struct llama_model_loader { return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx); } + void init_mapping(bool prefetch = true) { + /* + // prefetch only CPU tensors + if (use_mmap) { + size_t size_pref = 0; // prefetch + + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + if (cur->backend == GGML_BACKEND_CPU) { + size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur); + size_pref = std::max(size_pref, tensor_end); + } + } + mapping.reset(new llama_mmap(&file, gguf_get_data_offset(ctx_gguf) + size_pref, ggml_is_numa())); + } + */ + // prefetch the whole file - all the data is needed anyway + if (use_mmap) { + mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa())); + } + } + + // for backwards compatibility, does not support ggml-backend void load_data_for(struct ggml_tensor * cur) const { const size_t offs = file_offset(ggml_get_name(cur)); - if (use_mmap) { - cur->data = (uint8_t *) mapping->addr + offs; + if (use_mmap && mapping) { + GGML_ASSERT(cur->data == nullptr); + cur->data = (uint8_t *)mapping->addr + offs; } else { + GGML_ASSERT(cur->data != nullptr); file.seek(offs, SEEK_SET); file.read_raw(cur->data, ggml_nbytes(cur)); } } - void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { + void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const { size_t size_data = 0; - size_t size_lock = 0; - size_t size_pref = 0; // prefetch for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); size_data += ggml_nbytes(cur); - if (cur->backend == GGML_BACKEND_CPU) { - size_pref += ggml_nbytes(cur); - } } - if (use_mmap) { - mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa())); + if (use_mmap && buf_mmap) { if (lmlock) { lmlock->init(mapping->addr); } } - size_t done_size = 0; +#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST) + const bool legacy_offload = true; +#else + const bool legacy_offload = false; +#endif + + std::vector> read_buf; + + size_t size_done = 0; + + size_t mmap_first = -1; + size_t mmap_last = 0; + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); GGML_ASSERT(cur); // unused tensors should have been caught by load_data already if (progress_callback) { - progress_callback((float) done_size / size_data, progress_callback_user_data); + progress_callback((float) size_done / size_data, progress_callback_user_data); } - // allocate temp buffer if not using mmap - if (!use_mmap && cur->data == NULL) { - GGML_ASSERT(cur->backend != GGML_BACKEND_CPU); - #ifdef GGML_USE_CPU_HBM - cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur)); - #else - cur->data = (uint8_t*)malloc(ggml_nbytes(cur)); - #endif - } + const size_t offs = file_offset(ggml_get_name(cur)); - load_data_for(cur); - - switch (cur->backend) { - case GGML_BACKEND_CPU: - if (use_mmap && lmlock) { - size_lock += ggml_nbytes(cur); - lmlock->grow_to(size_lock); + if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) { + if (use_mmap && mapping) { + if (buf_mmap) { + ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs); + if (lmlock) { + lmlock->grow_to(offs + ggml_nbytes(cur)); + } + mmap_first = std::min(mmap_first, offs); + mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur)); + } else { + ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur)); } - break; -#ifdef GGML_USE_CUBLAS - case GGML_BACKEND_GPU: - case GGML_BACKEND_GPU_SPLIT: - // old code: - //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); - - // TODO: test if this works !! - ggml_cuda_transform_tensor(cur->data, cur); - if (!use_mmap) { - free(cur->data); + } else { + if (ggml_backend_buffer_is_host(cur->buffer)) { + file.seek(offs, SEEK_SET); + file.read_raw(cur->data, ggml_nbytes(cur)); + } else { + read_buf.resize(ggml_nbytes(cur)); + file.seek(offs, SEEK_SET); + file.read_raw(read_buf.data(), ggml_nbytes(cur)); + ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur)); } - break; + } + } else { + // HACK: mark tensor as allocated + cur->data = (void *)(uintptr_t)1; + void * data; + if (use_mmap && mapping) { + data = (uint8_t *) mapping->addr + offs; + } else { + read_buf.resize(ggml_nbytes(cur)); + file.seek(offs, SEEK_SET); + file.read_raw(read_buf.data(), ggml_nbytes(cur)); + data = read_buf.data(); + } + +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) + ggml_cuda_transform_tensor(data, cur); #elif defined(GGML_USE_CLBLAST) - case GGML_BACKEND_GPU: - ggml_cl_transform_tensor(cur->data, cur); - if (!use_mmap) { - free(cur->data); - } - break; + GGML_ASSERT(cur->backend == GGML_BACKEND_GPU); + ggml_cl_transform_tensor(data, cur); +#else + GGML_ASSERT(!"GPU tensor without a GPU backend"); + GGML_UNUSED(data); #endif - default: - continue; } - done_size += ggml_nbytes(cur); + size_done += ggml_nbytes(cur); + } + + // unmap offloaded tensors and metadata + if (use_mmap && mapping) { + mapping->unmap_fragment(0, mmap_first); + mapping->unmap_fragment(mmap_last, mapping->size); + } + + if (progress_callback) { + progress_callback(1.0f, progress_callback_user_data); } } }; @@ -2983,25 +3060,16 @@ static void llm_load_tensors( model.n_gpu_layers = n_gpu_layers; - size_t ctx_size; - size_t mmapped_size; + size_t ctx_size = ggml_tensor_overhead() * ml.n_tensors; - ml.calc_sizes(ctx_size, mmapped_size); - - LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0); // create the ggml context { - model.buf.resize(ctx_size); - if (use_mlock) { - model.mlock_buf.init (model.buf.data); - model.mlock_buf.grow_to(model.buf.size); - } - struct ggml_init_params params = { - /*.mem_size =*/ model.buf.size, - /*.mem_buffer =*/ model.buf.data, - /*.no_alloc =*/ ml.use_mmap, + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, }; model.ctx = ggml_init(params); @@ -3015,22 +3083,21 @@ static void llm_load_tensors( enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU; enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU; -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (ggml_cublas_loaded()) { LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__); ggml_cuda_set_main_device(main_gpu); - llama_backend_offload = GGML_BACKEND_GPU; + llama_backend_offload = GGML_BACKEND_GPU; llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT; } #elif defined(GGML_USE_CLBLAST) LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__); - llama_backend_offload = GGML_BACKEND_GPU; + llama_backend_offload = GGML_BACKEND_GPU; llama_backend_offload_split = GGML_BACKEND_GPU; #endif - // prepare memory for the weights - size_t vram_weights = 0; + // create tensors for the weights { const int64_t n_embd = hparams.n_embd; const int64_t n_embd_gqa = hparams.n_embd_gqa(); @@ -3059,13 +3126,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3115,28 +3175,6 @@ static void llm_load_tensors( layer.ffn_up_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}, backend_split); } } - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + - (layer.bq ? ggml_nbytes(layer.bq) : 0) + - (layer.bk ? ggml_nbytes(layer.bk) : 0) + - (layer.bv ? ggml_nbytes(layer.bv) : 0) + - (layer.bo ? ggml_nbytes(layer.bo) : 0) + - ggml_nbytes(layer.ffn_norm); - - if (layer.ffn_gate_inp == nullptr) { - vram_weights += - ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); - } else { - vram_weights += ggml_nbytes(layer.ffn_gate_inp); - for (uint32_t x = 0; x < hparams.n_expert; ++x) { - vram_weights += - ggml_nbytes(layer.ffn_gate_exp[x]) + ggml_nbytes(layer.ffn_down_exp[x]) + ggml_nbytes(layer.ffn_up_exp[x]); - } - } - } } } break; case LLM_ARCH_BAICHUAN: @@ -3156,13 +3194,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3189,19 +3220,10 @@ static void llm_load_tensors( layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); - } } } break; case LLM_ARCH_FALCON: { - // TODO: CPU-only for now - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); // output @@ -3220,14 +3242,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - vram_weights += ggml_nbytes(model.output_norm_b); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3248,11 +3262,6 @@ static void llm_load_tensors( if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) { layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend); layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(layer.attn_norm_2); - vram_weights += ggml_nbytes(layer.attn_norm_2_b); - } } layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); @@ -3260,13 +3269,6 @@ static void llm_load_tensors( layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + - ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) + - ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); - } } } break; case LLM_ARCH_STARCODER: @@ -3290,14 +3292,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - vram_weights += ggml_nbytes(model.output_norm_b); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3329,16 +3323,6 @@ static void llm_load_tensors( layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + - ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + - ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + - ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) + - ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b) + - ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b); - } } } break; case LLM_ARCH_PERSIMMON: @@ -3360,14 +3344,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - vram_weights += ggml_nbytes(model.output_norm_b); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3397,8 +3373,6 @@ static void llm_load_tensors( } break; case LLM_ARCH_BLOOM: { - // TODO: CPU-only for now - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU); model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU); @@ -3419,14 +3393,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - vram_weights += ggml_nbytes(model.output_norm_b); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3458,16 +3424,6 @@ static void llm_load_tensors( layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + - ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + - ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + - ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) + - ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) + - ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b); - } } } break; case LLM_ARCH_MPT: @@ -3489,13 +3445,6 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3518,16 +3467,6 @@ static void llm_load_tensors( layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + - ggml_nbytes(layer.wqkv) + - ggml_nbytes(layer.wo) + - ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.ffn_down) + - ggml_nbytes(layer.ffn_up); - } } } break; case LLM_ARCH_STABLELM: @@ -3550,13 +3489,6 @@ static void llm_load_tensors( model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } } const uint32_t n_ff = hparams.n_ff; @@ -3588,13 +3520,6 @@ static void llm_load_tensors( layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); - } } } break; case LLM_ARCH_QWEN: @@ -3614,14 +3539,7 @@ static void llm_load_tensors( model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - } - if (backend_output == GGML_BACKEND_GPU_SPLIT) { - vram_weights += ggml_nbytes(model.output); - } - } + } const uint32_t n_ff = hparams.n_ff / 2; @@ -3646,13 +3564,6 @@ static void llm_load_tensors( layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + - ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_gate) + - ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); - } } } break; case LLM_ARCH_PHI2: @@ -3676,13 +3587,6 @@ static void llm_load_tensors( model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output); - - if (backend_norm == GGML_BACKEND_GPU) { - vram_weights += ggml_nbytes(model.output_norm); - vram_weights += ggml_nbytes(model.output_norm_b); - vram_weights += ggml_nbytes(model.output); - vram_weights += ggml_nbytes(model.output_b); - } } const uint32_t n_ff = hparams.n_ff; @@ -3711,15 +3615,6 @@ static void llm_load_tensors( layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); - - if (backend == GGML_BACKEND_GPU) { - vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + - ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + - ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + - ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) + - ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b); - } } } break; default: @@ -3729,16 +3624,78 @@ static void llm_load_tensors( ml.done_getting_tensors(); + ml.init_mapping(); + + // allocate tensors + size_t vram_weights = 0; + size_t buf_size = 0; + + ggml_backend_buffer_type_t buft = llama_default_buffer_type(n_gpu_layers); + + for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { + // GGML_BACKEND_GPU tensors are for CUDA and OpenCL only, which are handled separately without ggml-backend + if (t->backend == GGML_BACKEND_CPU) { + buf_size += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), ggml_backend_buft_get_alignment(buft)); + } else { + vram_weights += ggml_nbytes(t); + } + } + + // create backend buffer + ggml_backend_buffer_t buf_mmap = nullptr; + +#ifdef GGML_USE_METAL + if (n_gpu_layers > 0) { + if (ml.use_mmap) { + const size_t max_size = ggml_get_max_tensor_size(ctx); + model.buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size); + buf_mmap = model.buf; + } else { + model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_metal_buffer_type()); + } + } +#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) + // for testing only + if (n_gpu_layers > 0) { + model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cuda_buffer_type(0)); + } +#endif + + if (model.buf == nullptr) { + // CPU backend, and indirectly CUDA and OpenCL + if (ml.use_mmap) { + model.buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size); + buf_mmap = model.buf; + } else { + // allocate only CPU tensors + model.buf = ggml_backend_buft_alloc_buffer(buft, buf_size); + ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(model.buf); + for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { + if (t->backend == GGML_BACKEND_CPU) { + ggml_tallocr_alloc(alloc, t); + } + } + ggml_tallocr_free(alloc); + } + } + + if (use_mlock && ggml_backend_buffer_is_host(model.buf)) { + model.mlock_buf.init (ggml_backend_buffer_get_base(model.buf)); + model.mlock_buf.grow_to(ggml_backend_buffer_get_size(model.buf)); + } + // print memory requirements { - // this is the total memory required to run the inference - size_t mem_required = - ctx_size + - mmapped_size - vram_weights; // weights in VRAM not in memory + size_t sys_mem_required = ctx_size + buf_size; - LLAMA_LOG_INFO("%s: mem required = %7.2f MiB\n", __func__, mem_required / 1024.0 / 1024.0); + if (sys_mem_required > 0) { + LLAMA_LOG_INFO("%s: system memory used = %7.2f MiB\n", __func__, sys_mem_required / 1024.0 / 1024.0); + } + if (vram_weights > 0) { + LLAMA_LOG_INFO("%s: VRAM used = %7.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0); + } -#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) +#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu); @@ -3746,39 +3703,26 @@ static void llm_load_tensors( LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__); } -#ifdef GGML_USE_CUBLAS const int max_backend_supported_layers = hparams.n_layer + 1; const int max_offloadable_layers = hparams.n_layer + 1; -#elif GGML_USE_CLBLAST - const int max_backend_supported_layers = hparams.n_layer + 1; - const int max_offloadable_layers = hparams.n_layer + 1; -#endif // GGML_USE_CUBLAS LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); - LLAMA_LOG_INFO("%s: VRAM used: %.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0); -#else - (void) n_gpu_layers; #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) } - // populate `tensors_by_name` +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) + ggml_cuda_set_tensor_split(tensor_split); +#else + GGML_UNUSED(tensor_split); +#endif // GGML_USE_CUBLAS + + // populate tensors_by_name for (int i = 0; i < ml.n_tensors; ++i) { struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i)); model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); } - (void) tensor_split; -#ifdef GGML_USE_CUBLAS - { - ggml_cuda_set_tensor_split(tensor_split); - } -#endif - - ml.load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); - - if (progress_callback) { - progress_callback(1.0f, progress_callback_user_data); - } + ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL); model.mapping = std::move(ml.mapping); @@ -4211,7 +4155,7 @@ struct llm_build_context { const llm_build_cb & cb; - llama_buffer & buf_compute; + std::vector & buf_compute_meta; struct ggml_context * ctx0 = nullptr; @@ -4221,35 +4165,35 @@ struct llm_build_context { const llama_batch & batch, const llm_build_cb & cb, bool worst_case) : - model (lctx.model), - hparams (model.hparams), - cparams (lctx.cparams), - batch (batch), - kv_self (lctx.kv_self), - n_embd (hparams.n_embd), - n_layer (hparams.n_layer), - n_ctx (cparams.n_ctx), - n_head (hparams.n_head), - n_head_kv (hparams.n_head_kv), - n_embd_head (hparams.n_embd_head()), - n_embd_gqa (hparams.n_embd_gqa()), - n_expert (hparams.n_expert), - n_expert_used (hparams.n_expert_used), - freq_base (cparams.rope_freq_base), - freq_scale (cparams.rope_freq_scale), - ext_factor (cparams.yarn_ext_factor), - attn_factor (cparams.yarn_attn_factor), - beta_fast (cparams.yarn_beta_fast), - beta_slow (cparams.yarn_beta_slow), - norm_eps (hparams.f_norm_eps), - norm_rms_eps (hparams.f_norm_rms_eps), - n_tokens (batch.n_tokens), - n_kv (worst_case ? n_ctx : kv_self.n), - kv_head (worst_case ? n_ctx - n_tokens : kv_self.head), - n_orig_ctx (cparams.n_yarn_orig_ctx), - do_rope_shift (worst_case || kv_self.has_shift), - cb (cb), - buf_compute (lctx.buf_compute) { + model (lctx.model), + hparams (model.hparams), + cparams (lctx.cparams), + batch (batch), + kv_self (lctx.kv_self), + n_embd (hparams.n_embd), + n_layer (hparams.n_layer), + n_ctx (cparams.n_ctx), + n_head (hparams.n_head), + n_head_kv (hparams.n_head_kv), + n_embd_head (hparams.n_embd_head()), + n_embd_gqa (hparams.n_embd_gqa()), + n_expert (hparams.n_expert), + n_expert_used (hparams.n_expert_used), + freq_base (cparams.rope_freq_base), + freq_scale (cparams.rope_freq_scale), + ext_factor (cparams.yarn_ext_factor), + attn_factor (cparams.yarn_attn_factor), + beta_fast (cparams.yarn_beta_fast), + beta_slow (cparams.yarn_beta_slow), + norm_eps (hparams.f_norm_eps), + norm_rms_eps (hparams.f_norm_rms_eps), + n_tokens (batch.n_tokens), + n_kv (worst_case ? n_ctx : kv_self.n), + kv_head (worst_case ? n_ctx - n_tokens : kv_self.head), + n_orig_ctx (cparams.n_yarn_orig_ctx), + do_rope_shift (worst_case || kv_self.has_shift), + cb (cb), + buf_compute_meta (lctx.buf_compute_meta) { GGML_ASSERT(!!kv_self.ctx); // all initializations should be done in init() @@ -4257,8 +4201,8 @@ struct llm_build_context { void init() { struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, + /*.mem_size =*/ buf_compute_meta.size(), + /*.mem_buffer =*/ buf_compute_meta.data(), /*.no_alloc =*/ true, }; @@ -5737,8 +5681,8 @@ static const std::unordered_map k_offload_map { "pos_embd", OFFLOAD_FUNC_NR }, { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope) - { "Q_scale", OFFLOAD_FUNC_FRC }, - { "KQ_scale", OFFLOAD_FUNC_FRC }, + { "Q_scale", OFFLOAD_FUNC_NOP }, + { "KQ_scale", OFFLOAD_FUNC_NOP }, { "KQ_mask", OFFLOAD_FUNC_FRC }, { "K_shift", OFFLOAD_FUNC_FRC }, @@ -5845,7 +5789,7 @@ static struct ggml_cgraph * llama_build_graph( bool alloc_inp_KQ_mask = false; bool alloc_inp_K_shift = false; -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) const bool do_offload = true; #else const bool do_offload = true; // TODO: set to false after finishing refactoring @@ -5873,7 +5817,7 @@ static struct ggml_cgraph * llama_build_graph( if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) { const int64_t n_tokens = cur->ne[0]; - memcpy(cur->data, batch.token, n_tokens*ggml_element_size(cur)); + ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur)); } alloc_inp_tokens = true; @@ -5886,7 +5830,7 @@ static struct ggml_cgraph * llama_build_graph( const int64_t n_embd = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; - memcpy(cur->data, batch.embd, n_tokens*n_embd*ggml_element_size(cur)); + ggml_backend_tensor_set(cur, batch.embd, 0, n_tokens*n_embd*ggml_element_size(cur)); } alloc_inp_embd = true; @@ -5898,11 +5842,8 @@ static struct ggml_cgraph * llama_build_graph( if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) { const int64_t n_tokens = cur->ne[0]; - int32_t * data = (int32_t *) cur->data; - - for (int i = 0; i < n_tokens; ++i) { - data[i] = batch.pos[i]; - } + static_assert(std::is_same::value, "llama_pos must be int32_t"); + ggml_backend_tensor_set(cur, batch.pos, 0, n_tokens*ggml_element_size(cur)); } alloc_inp_pos = true; @@ -5913,7 +5854,8 @@ static struct ggml_cgraph * llama_build_graph( if (!ggml_allocr_is_measure(lctx.alloc)) { const int64_t n_embd_head = model.hparams.n_embd_head(); - ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head))); + float f = 1.0f/sqrtf(float(n_embd_head)); + ggml_backend_tensor_set(cur, &f, 0, sizeof(f)); } alloc_inp_Q_scale = true; @@ -5924,13 +5866,15 @@ static struct ggml_cgraph * llama_build_graph( if (!ggml_allocr_is_measure(lctx.alloc)) { const int64_t n_embd_head = model.hparams.n_embd_head(); + float f; if (model.arch == LLM_ARCH_PHI2) { // with phi2, we scale the Q to avoid precision issues // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 - ggml_set_f32(cur, 1.0f); + f = 1.0f; } else { - ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head))); + f = 1.0f/sqrtf(float(n_embd_head)); } + ggml_backend_tensor_set(cur, &f, 0, sizeof(f)); } alloc_inp_KQ_scale = true; @@ -5943,8 +5887,13 @@ static struct ggml_cgraph * llama_build_graph( const int64_t n_kv = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; - float * data = (float *) cur->data; - memset(data, 0, ggml_nbytes(cur)); + float * data; + if (ggml_backend_buffer_is_host(cur->buffer)) { + data = (float *) cur->data; + } else { + lctx.buf_copy.resize(ggml_nbytes(cur)); + data = (float *) lctx.buf_copy.data(); + } for (int h = 0; h < 1; ++h) { for (int j = 0; j < n_tokens; ++j) { @@ -5952,12 +5901,20 @@ static struct ggml_cgraph * llama_build_graph( const llama_seq_id seq_id = batch.seq_id[j][0]; for (int i = 0; i < n_kv; ++i) { + float f; if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; + f = -INFINITY; + } else { + f = 0; } + data[h*(n_kv*n_tokens) + j*n_kv + i] = f; } } } + + if (data != cur->data) { + ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur)); + } } alloc_inp_KQ_mask = true; @@ -5969,11 +5926,21 @@ static struct ggml_cgraph * llama_build_graph( if (!ggml_allocr_is_measure(lctx.alloc)) { const int64_t n_ctx = cur->ne[0]; - int32_t * data = (int32_t *) cur->data; + int32_t * data; + if (ggml_backend_buffer_is_host(cur->buffer)) { + data = (int32_t *) cur->data; + } else { + lctx.buf_copy.resize(ggml_nbytes(cur)); + data = (int32_t *) lctx.buf_copy.data(); + } for (int i = 0; i < n_ctx; ++i) { data[i] = lctx.kv_self.cells[i].delta; } + + if (data != cur->data) { + ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur)); + } } alloc_inp_K_shift = true; @@ -6010,7 +5977,7 @@ static struct ggml_cgraph * llama_build_graph( static const std::unordered_map> k_offload_func_name = { { OFFLOAD_FUNC_NOP, "CPU" }, { OFFLOAD_FUNC_OUT, "CPU" }, -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) { OFFLOAD_FUNC, "GPU (CUDA)" }, { OFFLOAD_FUNC_FRC, "GPU (CUDA) FRC" }, { OFFLOAD_FUNC_KQV, "GPU (CUDA) KQV" }, @@ -6083,7 +6050,7 @@ static struct ggml_cgraph * llama_build_graph( offload_func_t func = ggml_offload_nop; // this is needed for compatibility with Metal for example -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc; #else static offload_func_t ggml_offload_gpu = ggml_offload_nop; @@ -6305,11 +6272,12 @@ static int llama_decode_internal( GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); } -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) + char * buf_alloc_base = (char *)ggml_backend_buffer_get_base(lctx.buf_alloc); for (int i = 0; i < gf->n_leafs; i++) { ggml_tensor * node = gf->leafs[i]; if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) { - ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data); + ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base); ggml_cuda_copy_to_device(node); } } @@ -6317,7 +6285,7 @@ static int llama_decode_internal( for (int i = 0; i < gf->n_nodes; i++) { ggml_tensor * node = gf->nodes[i]; if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) { - ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data); + ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base); } } @@ -6344,23 +6312,23 @@ static int llama_decode_internal( n_threads = 1; } -#if GGML_USE_MPI +#ifdef GGML_USE_MPI const int64_t n_layer = hparams.n_layer; ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer); #endif #ifdef GGML_USE_METAL - if (lctx.ctx_metal) { - ggml_metal_set_n_cb (lctx.ctx_metal, n_threads); - ggml_metal_graph_compute(lctx.ctx_metal, gf); - } else { - ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); + if (ggml_backend_is_metal(lctx.backend)) { + ggml_backend_metal_set_n_cb(lctx.backend, n_threads); } -#else - ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); #endif -#if GGML_USE_MPI + if (ggml_backend_is_cpu(lctx.backend)) { + ggml_backend_cpu_set_n_threads(lctx.backend, n_threads); + } + ggml_backend_graph_compute(lctx.backend, gf); + +#ifdef GGML_USE_MPI ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer); #endif @@ -6412,20 +6380,20 @@ static int llama_decode_internal( if (batch.logits[i] == 0) { continue; } - memcpy(logits_out.data() + (n_vocab*i), (float *) ggml_get_data(res) + (n_vocab*i), sizeof(float)*n_vocab); + ggml_backend_tensor_get(res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float)); #ifndef NDEBUG logits_valid[i] = true; #endif } } else if (lctx.logits_all) { logits_out.resize(n_vocab * n_tokens); - memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*n_tokens); + ggml_backend_tensor_get(res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float)); #ifndef NDEBUG std::fill(logits_valid.begin(), logits_valid.end(), true); #endif } else { logits_out.resize(n_vocab); - memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(n_tokens - 1)), sizeof(float)*n_vocab); + ggml_backend_tensor_get(res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float)); #ifndef NDEBUG logits_valid[0] = true; #endif @@ -6437,7 +6405,7 @@ static int llama_decode_internal( auto & embedding_out = lctx.embedding; embedding_out.resize(n_embd); - memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(n_tokens - 1)), sizeof(float)*n_embd); + ggml_backend_tensor_get(embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float)); } // measure the performance only for the single-token evals @@ -8395,12 +8363,6 @@ void llama_beam_search(llama_context * ctx, // quantization // -template -struct no_init { - T value; - no_init() { /* do nothing */ } -}; - struct quantize_state_internal { const llama_model & model; const llama_model_quantize_params * params; @@ -8643,9 +8605,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s #endif llama_model_loader ml(fname_inp, use_mmap, NULL); - if (ml.use_mmap) { - ml.mapping.reset(new llama_mmap(&ml.file, /* prefetch */ 0, ggml_is_numa())); - } + ml.init_mapping(false); // no prefetching? llama_model model; llm_load_arch(ml, model); @@ -8944,29 +8904,10 @@ static int llama_apply_lora_from_file_internal( // load base model std::unique_ptr ml; - unique_context base_ctx(nullptr, ggml_free); - std::vector base_buf; - if (path_base_model) { + if (path_base_model) { LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model); - ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ NULL)); - - size_t ctx_size; - size_t mmapped_size; - ml->calc_sizes(ctx_size, mmapped_size); - - base_buf.resize(ctx_size); - - ggml_init_params base_params; - base_params.mem_size = base_buf.size(); - base_params.mem_buffer = base_buf.data(); - base_params.no_alloc = ml->use_mmap; - - base_ctx.reset(ggml_init(base_params)); - - // maybe this should be in llama_model_loader - if (ml->use_mmap) { - ml->mapping.reset(new llama_mmap(&ml->file, /* prefetch */ 0, ggml_is_numa())); - } + ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr)); + ml->init_mapping(false); // no prefetching } // read tensors and apply @@ -9058,7 +8999,7 @@ static int llama_apply_lora_from_file_internal( offload_func_t offload_func = ggml_offload_nop; offload_func_t offload_func_force_inplace = ggml_offload_nop; -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { if (dest_t->type != GGML_TYPE_F16) { throw std::runtime_error(format( @@ -9079,7 +9020,7 @@ static int llama_apply_lora_from_file_internal( return 1; } - base_t = ml->create_tensor(base_ctx.get(), base_name, { dest_t->ne[0], dest_t->ne[1] }, GGML_BACKEND_CPU); + base_t = ml->get_tensor_meta(base_name.c_str()); ml->load_data_for(base_t); } else { base_t = dest_t; @@ -9364,7 +9305,39 @@ struct llama_context * llama_new_context_with_model( // reserve memory for context buffers if (!hparams.vocab_only) { - if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v, cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) { + // initialize backend +#ifdef GGML_USE_METAL + if (model->n_gpu_layers > 0) { + ctx->backend = ggml_backend_metal_init(); + if (ctx->backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__); + } + } +#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) + // for testing only + if (model->n_gpu_layers > 0) { + ctx->backend = ggml_backend_cuda_init(0); + if (ctx->backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend\n", __func__); + } + } +#endif + + if (ctx->backend == nullptr && ggml_backend_buffer_is_host(model->buf)) { + ctx->backend = ggml_backend_cpu_init(); + if (ctx->backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); + } + } + + if (ctx->backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize a backend\n", __func__); + delete ctx; + return nullptr; + } + + if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v, + cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) { LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); return nullptr; @@ -9400,12 +9373,11 @@ struct llama_context * llama_new_context_with_model( } { - static const size_t tensor_alignment = 32; // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data - ctx->buf_compute.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); + ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); // create measure allocator - ctx->alloc = ggml_allocr_new_measure(tensor_alignment); + ctx->alloc = ggml_allocr_new_measure_from_backend(ctx->backend); // build worst-case graph int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch); @@ -9413,98 +9385,50 @@ struct llama_context * llama_new_context_with_model( llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0)); -#ifdef GGML_USE_METAL - if (model->n_gpu_layers > 0) { - ctx->ctx_metal = ggml_metal_init(1); - if (!ctx->ctx_metal) { - LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); - llama_free(ctx); - return NULL; - } - //ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false); - //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); - } -#endif // measure memory requirements for the graph - size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; + size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf); - LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute_meta.size() + alloc_size) / 1024.0 / 1024.0); - // recreate allocator with exact memory requirements + // create allocator again with exact memory requirements ggml_allocr_free(ctx->alloc); - ctx->buf_alloc.resize(alloc_size); - ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment); -#ifdef GGML_USE_METAL - if (ctx->ctx_metal) { - //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); - } -#endif -#ifdef GGML_USE_CUBLAS - ggml_cuda_set_scratch_size(alloc_size); - LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0); + ctx->buf_alloc = ggml_backend_alloc_buffer(ctx->backend, alloc_size); + ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc); +#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) + if (model->n_gpu_layers > 0) { + ggml_cuda_set_scratch_size(alloc_size); + LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0); - // calculate total VRAM usage - auto add_tensor = [](const ggml_tensor * t, size_t & size) { - if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) { - size += ggml_nbytes(t); + // calculate total VRAM usage + auto add_tensor = [](const ggml_tensor * t, size_t & size) { + if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) { + size += ggml_nbytes(t); + } + }; + size_t model_vram_size = 0; + for (const auto & kv : model->tensors_by_name) { + add_tensor(kv.second, model_vram_size); } - }; - size_t model_vram_size = 0; - for (const auto & kv : model->tensors_by_name) { - add_tensor(kv.second, model_vram_size); - } - size_t kv_vram_size = 0; - for (auto & k : ctx->kv_self.k_l) { - add_tensor(k, kv_vram_size); - } - for (auto & v : ctx->kv_self.v_l) { - add_tensor(v, kv_vram_size); - } + size_t kv_vram_size = 0; + for (auto & k : ctx->kv_self.k_l) { + add_tensor(k, kv_vram_size); + } + for (auto & v : ctx->kv_self.v_l) { + add_tensor(v, kv_vram_size); + } - size_t ctx_vram_size = alloc_size + kv_vram_size; - size_t total_vram_size = model_vram_size + ctx_vram_size; + size_t ctx_vram_size = alloc_size + kv_vram_size; + size_t total_vram_size = model_vram_size + ctx_vram_size; - LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__, - total_vram_size / 1024.0 / 1024.0, - model_vram_size / 1024.0 / 1024.0, - ctx_vram_size / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__, + total_vram_size / 1024.0 / 1024.0, + model_vram_size / 1024.0 / 1024.0, + ctx_vram_size / 1024.0 / 1024.0); + } #endif } - -#ifdef GGML_USE_METAL - if (model->n_gpu_layers > 0) { - // this allocates all Metal resources and memory buffers - - void * data_ptr = NULL; - size_t data_size = 0; - - if (ctx->model.mapping) { - data_ptr = ctx->model.mapping->addr; - data_size = ctx->model.mapping->size; - } else { - data_ptr = ggml_get_mem_buffer(ctx->model.ctx); - data_size = ggml_get_mem_size (ctx->model.ctx); - } - - const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - - LLAMA_LOG_INFO("%s: max tensor size = %8.2f MiB\n", __func__, max_size/1024.0/1024.0); - -#define LLAMA_METAL_CHECK_BUF(result) \ - if (!(result)) { \ - LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ - llama_free(ctx); \ - return NULL; \ - } - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0)); -#undef LLAMA_METAL_CHECK_BUF - } -#endif } #ifdef GGML_USE_MPI @@ -9796,7 +9720,7 @@ size_t llama_get_state_size(const struct llama_context * ctx) { const size_t s_embedding = ctx->embedding.size() * sizeof(float); const size_t s_kv_size = sizeof(size_t); const size_t s_kv_ntok = sizeof(int); - const size_t s_kv = ctx->kv_self.buf.size; + const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.buf); const size_t s_total = ( + s_rng_size @@ -9924,7 +9848,7 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat const auto n_embd = hparams.n_embd_gqa(); const auto n_ctx = cparams.n_ctx; - const size_t kv_buf_size = kv_self.buf.size; + const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.buf); const uint32_t kv_head = kv_self.head; const uint32_t kv_size = kv_self.size; const uint32_t kv_used = kv_self.used; @@ -9940,17 +9864,12 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); ggml_cgraph * gf = ggml_new_graph(cpy_ctx); - std::vector> kout2d_data(n_layer); - std::vector> vout2d_data(n_layer); + std::vector kout2d(n_layer); + std::vector vout2d(n_layer); for (int il = 0; il < (int) n_layer; ++il) { - ggml_tensor * kout2d = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head); - kout2d_data[il].resize(ggml_nbytes(kout2d)); - kout2d->data = kout2d_data[il].data(); - - ggml_tensor * vout2d = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd); - vout2d_data[il].resize(ggml_nbytes(vout2d)); - vout2d->data = vout2d_data[il].data(); + kout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head); + vout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd); ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il], n_embd, kv_head, @@ -9960,20 +9879,28 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat kv_head, n_embd, elt_size*n_ctx, 0); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d)); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d)); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d[il])); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d[il])); } - ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1); + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend); + + ggml_backend_graph_compute(ctx->backend, gf); + + std::vector tmp_buf; + for (int il = 0; il < (int) n_layer; ++il) { + tmp_buf.resize(ggml_nbytes(kout2d[il])); + ggml_backend_tensor_get(kout2d[il], tmp_buf.data(), 0, tmp_buf.size()); + data_ctx->write(tmp_buf.data(), tmp_buf.size()); + + tmp_buf.resize(ggml_nbytes(vout2d[il])); + ggml_backend_tensor_get(vout2d[il], tmp_buf.data(), 0, tmp_buf.size()); + data_ctx->write(tmp_buf.data(), tmp_buf.size()); + } ggml_free(cpy_ctx); - // our data is now in the kout2d_data and vout2d_data buffers - // write them to file - for (uint32_t il = 0; il < n_layer; ++il) { - data_ctx->write(kout2d_data[il].data(), kout2d_data[il].size()); - data_ctx->write(vout2d_data[il].data(), vout2d_data[il].size()); - } + ggml_backend_buffer_free(buf); } for (uint32_t i = 0; i < kv_size; ++i) { @@ -10071,21 +9998,19 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used); if (kv_buf_size) { - GGML_ASSERT(kv_self.buf.size == kv_buf_size); + GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.buf) == kv_buf_size); const size_t elt_size = ggml_element_size(kv_self.k_l[0]); ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); ggml_cgraph * gf = ggml_new_graph(cpy_ctx); - for (int il = 0; il < n_layer; ++il) { - ggml_tensor * kin2d = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head); - kin2d->data = (void *) inp; - inp += ggml_nbytes(kin2d); + std::vector kin2d(n_layer); + std::vector vin2d(n_layer); - ggml_tensor * vin2d = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd); - vin2d->data = (void *) inp; - inp += ggml_nbytes(vin2d); + for (int il = 0; il < n_layer; ++il) { + kin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head); + vin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd); ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il], n_embd, kv_head, @@ -10095,13 +10020,26 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { kv_head, n_embd, elt_size*n_ctx, 0); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d, k2d)); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d, v2d)); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d[il], k2d)); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d[il], v2d)); } - ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1); + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend); + + // load data into the tensors + for (int il = 0; il < n_layer; ++il) { + ggml_backend_tensor_set(kin2d[il], inp, 0, ggml_nbytes(kin2d[il])); + inp += ggml_nbytes(kin2d[il]); + + ggml_backend_tensor_set(vin2d[il], inp, 0, ggml_nbytes(vin2d[il])); + inp += ggml_nbytes(vin2d[il]); + } + + ggml_backend_graph_compute(ctx->backend, gf); ggml_free(cpy_ctx); + + ggml_backend_buffer_free(buf); } ctx->kv_self.head = kv_head; From 4a5f9d629ecfd0a53afdddbaf54a4fa02d9a9ce9 Mon Sep 17 00:00:00 2001 From: Samuel Maynard Date: Thu, 21 Dec 2023 22:36:26 +0200 Subject: [PATCH 32/84] ci : add `jlumbroso/free-disk-space` to docker workflow (#4150) * [github][workflows][docker]: removes hardcoded `ggerganov` from `ghcr` repo * [github][workflows][docker]: adds `jlumbroso/free-disk-space` --- .github/workflows/docker.yml | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 9c90c77ac..a7165a38f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -52,6 +52,23 @@ jobs: username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} + # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: true + - name: Build and push Docker image (versioned) if: github.event_name == 'push' uses: docker/build-push-action@v4 @@ -59,7 +76,7 @@ jobs: context: . push: true platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}" + tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}" file: ${{ matrix.config.dockerfile }} - name: Build and push Docker image (tagged) @@ -68,5 +85,5 @@ jobs: context: . push: ${{ github.event_name == 'push' }} platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}" + tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}" file: ${{ matrix.config.dockerfile }} From 32259b2dade6f6856739bf7ba0a4ff7b474dc760 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 21 Dec 2023 23:07:58 +0200 Subject: [PATCH 33/84] gguf : simplify example dependencies --- Makefile | 2 +- examples/gguf/CMakeLists.txt | 2 +- examples/gguf/gguf.cpp | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 512407a1d..68df7702a 100644 --- a/Makefile +++ b/Makefile @@ -606,7 +606,7 @@ save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(C server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2) -Wno-cast-qual -gguf: examples/gguf/gguf.cpp ggml.o llama.o $(OBJS) +gguf: examples/gguf/gguf.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) diff --git a/examples/gguf/CMakeLists.txt b/examples/gguf/CMakeLists.txt index 7d1806af3..6481f087b 100644 --- a/examples/gguf/CMakeLists.txt +++ b/examples/gguf/CMakeLists.txt @@ -1,5 +1,5 @@ set(TARGET gguf) add_executable(${TARGET} gguf.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index 9e24bf24c..e67be4fb2 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -1,5 +1,4 @@ #include "ggml.h" -#include "llama.h" #include #include From 769a7bc85eaa44e3d7eadf39abfeff7bb0b9cc2f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 21 Dec 2023 23:20:36 +0200 Subject: [PATCH 34/84] gguf-py : fix broken link --- gguf-py/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf-py/README.md b/gguf-py/README.md index a27d2fc0e..22d7ffa52 100644 --- a/gguf-py/README.md +++ b/gguf-py/README.md @@ -3,7 +3,7 @@ This is a Python package for writing binary files in the [GGUF](https://github.com/ggerganov/ggml/pull/302) (GGML Universal File) format. -See [convert-llama-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert-llama-hf-to-gguf.py) +See [convert-llama-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py) as an example for its usage. ## Installation From afefa319f1f59b002dfa0d1ef407a2c74bd9770b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 21 Dec 2023 23:20:49 +0200 Subject: [PATCH 35/84] ggml : change ggml_scale to take a float instead of tensor (#4573) * ggml : change ggml_scale to take a float instead of tensor * ggml : fix CPU implementation * tests : fix test-grad0 ggml-ci --- examples/baby-llama/baby-llama.cpp | 15 +-- examples/export-lora/export-lora.cpp | 2 +- examples/finetune/finetune.cpp | 42 +++---- examples/llava/clip.cpp | 8 +- .../train-text-from-scratch.cpp | 14 +-- ggml-cuda.cu | 14 +-- ggml-metal.m | 6 +- ggml.c | 42 +++---- ggml.h | 4 +- llama.cpp | 119 +++--------------- tests/test-backend-ops.cpp | 9 +- tests/test-grad0.cpp | 12 +- 12 files changed, 82 insertions(+), 205 deletions(-) diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp index 2dc2988d3..e7d2ad592 100644 --- a/examples/baby-llama/baby-llama.cpp +++ b/examples/baby-llama/baby-llama.cpp @@ -575,10 +575,7 @@ static struct ggml_tensor * forward( // KQ_scaled = KQ / sqrt(n_embd/n_head) // KQ_scaled shape [n_past + N, N, n_head, 1] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); + struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // KQ_masked shape [n_past + N, N, n_head, 1] @@ -844,10 +841,7 @@ static struct ggml_tensor * forward_batch( // KQ_scaled = KQ / sqrt(n_embd/n_head) // KQ_scaled shape [n_past + N, N, n_head, n_batch] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); + struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); assert_shape_4d(KQ_scaled, n_past + N, N, n_head, n_batch); // KQ_masked = mask_past(KQ_scaled) @@ -1131,10 +1125,7 @@ static struct ggml_tensor * forward_lora( // KQ_scaled = KQ / sqrt(n_embd/n_head) // KQ_scaled shape [n_past + N, N, n_head, 1] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); + struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // KQ_masked shape [n_past + N, N, n_head, 1] diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index c8754ce70..58fbe204d 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -309,7 +309,7 @@ static struct ggml_cgraph * build_graph_lora( ) { struct ggml_tensor * ab = ggml_mul_mat(ctx, lora_a, lora_b); if (scaling != 1.0f) { - ab = ggml_scale(ctx, ab, ggml_new_f32(ctx, scaling)); + ab = ggml_scale(ctx, ab, scaling); } struct ggml_tensor * res = ggml_add_inplace(ctx, tensor, ab); diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 6a668d764..7b1333a9d 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -269,7 +269,7 @@ static void load_model_hparams_gguf(struct gguf_context * ctx, struct my_llama_h float rope_freq_scale = 1.0f; GGUF_GET_KEY(ctx, hparams->f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS)); GGUF_GET_KEY(ctx, hparams->rope_freq_base, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE)); - GGUF_GET_KEY(ctx, rope_freq_scale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR)); + GGUF_GET_KEY(ctx, rope_freq_scale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR)); if (rope_freq_scale != 1.0f) { hparams->rope_freq_scale = 1.0f / rope_freq_scale; } @@ -612,6 +612,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( const int n_rot = hparams.n_embd_head(); const int n_embd_head = hparams.n_embd_head(); const int n_embd_gqa = hparams.n_embd_gqa(); + const float rms_norm_eps = hparams.f_norm_rms_eps; const float rope_freq_base = hparams.rope_freq_base; const float rope_freq_scale = hparams.rope_freq_scale; @@ -680,10 +681,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( checkpoints.push_back(t01); } - struct ggml_tensor * kv_scale = NULL; - if (!enable_flash_attn) { - kv_scale = ggml_new_f32(ctx, 1.0f/sqrtf(float(n_embd)/n_head)); - } + const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head); for (int il = 0; il < n_layer; ++il) { struct my_llama_layer & layer = model->layers[il]; @@ -781,32 +779,32 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( // make sure some tensors are not reallocated by inserting new temporary nodes depending on them int n_leafs_before = gb->n_leafs; int n_nodes_before = gb->n_nodes; - struct ggml_tensor * one = ggml_new_f32(ctx, 1.0f); + // output tensors - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f)); // input gradient - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f)); GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL); ggml_allocr_alloc(alloc, t36->grad); // KQ_pos - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f)); // make sure base model tensors data cannot be used in viewable operations - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->norm, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->output, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->norm, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->output, 1.0f)); for (int il = 0; il < n_layer; ++il) { struct my_llama_layer & layer = model->layers[il]; - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.attention_norm, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_norm, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wq, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.attention_norm, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_norm, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wq, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, 1.0f)); } // allocating checkpoints in one block to reduce memory fragmentation diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 112465968..f06ec400d 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -330,12 +330,6 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima ggml_repeat(ctx0, model.pre_ln_b, embeddings)); } - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_allocr_alloc(ctx->alloc, KQ_scale); - if (!ggml_allocr_is_measure(ctx->alloc)) { - ggml_set_f32(KQ_scale, 1.0f / sqrt((float)d_head)); - } - // loop over layers for (int il = 0; il < n_layer - 1; il++) { struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states @@ -356,7 +350,7 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima struct ggml_tensor * Q = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, cur), ggml_mul_mat(ctx0, model.layers[il].q_w, cur)); - Q = ggml_scale_inplace(ctx0, Q, KQ_scale); + Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size); Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size); diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index f7ed63365..4a9a2340b 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -369,10 +369,7 @@ static struct ggml_tensor * llama_build_train_graphs( checkpoints.push_back(t00); checkpoints.push_back(t01); - struct ggml_tensor * kv_scale = NULL; - if (!enable_flash_attn) { - kv_scale = ggml_new_f32(ctx, 1.0f/sqrtf(float(n_embd)/n_head)); - } + const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head); for (int il = 0; il < n_layer; ++il) { struct my_llama_layer & layer = model->layers[il]; @@ -444,14 +441,13 @@ static struct ggml_tensor * llama_build_train_graphs( // make sure some tensors are not reallocated by inserting new temporary nodes depending on them int n_leafs_before = gb->n_leafs; int n_nodes_before = gb->n_nodes; - struct ggml_tensor * one = ggml_new_f32(ctx, 1.0f); // output tensors - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f)); // input gradient - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f)); // KQ_pos - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f)); GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL); ggml_allocr_alloc(alloc, t36->grad); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f5e060d32..ac91ee12e 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7700,17 +7700,9 @@ inline void ggml_cuda_op_scale( const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); - float scale; - // HACK: support for ggml backend interface - if (src1->backend == GGML_BACKEND_CPU) { - scale = ((float *) src1->data)[0]; - } else { - // TODO: pass pointer to kernel instead of copying to host - CUDA_CHECK(cudaMemcpy(&scale, src1->data, sizeof(float), cudaMemcpyDeviceToHost)); - } + const float scale = ((float *) dst->op_params)[0]; scale_f32_cuda(src0_dd, dst_dd, scale, ggml_nelements(src0), main_stream); CUDA_CHECK(cudaGetLastError()); @@ -7757,8 +7749,6 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU; const bool dst_on_device = dst->backend == GGML_BACKEND_GPU; - const bool src1_stays_on_host = use_src1 && dst->op == GGML_OP_SCALE; - // dd = data device float * src0_ddf = nullptr; float * src1_ddf = nullptr; @@ -7779,7 +7769,7 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf, src0, 0, 0, 0, nrows0, main_stream)); } - if (use_src1 && !src1_stays_on_host) { + if (use_src1) { if (src1_on_device) { src1_ddf = (float *) src1_extra->data_device[g_main_device]; } else { diff --git a/ggml-metal.m b/ggml-metal.m index e60b93b36..51a72ae33 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1293,7 +1293,7 @@ void ggml_metal_graph_compute( { GGML_ASSERT(ggml_is_contiguous(src0)); - const float scale = *(const float *) src1->data; + const float scale = *(const float *) dst->op_params; int64_t n = ggml_nelements(dst); @@ -1304,8 +1304,8 @@ void ggml_metal_graph_compute( [encoder setComputePipelineState:ctx->pipeline_scale]; } - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; [encoder setBytes:&scale length:sizeof(scale) atIndex:2]; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; diff --git a/ggml.c b/ggml.c index 236148514..f27920a2d 100644 --- a/ggml.c +++ b/ggml.c @@ -4171,23 +4171,23 @@ struct ggml_tensor * ggml_out_prod( static struct ggml_tensor * ggml_scale_impl( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b, + float s, bool inplace) { - GGML_ASSERT(ggml_is_scalar(b)); GGML_ASSERT(ggml_is_padded_1d(a)); bool is_node = false; - if (a->grad || b->grad) { + if (a->grad) { is_node = true; } struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + ggml_set_op_params(result, &s, sizeof(s)); + result->op = GGML_OP_SCALE; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; - result->src[1] = b; return result; } @@ -4195,15 +4195,15 @@ static struct ggml_tensor * ggml_scale_impl( struct ggml_tensor * ggml_scale( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, false); + float s) { + return ggml_scale_impl(ctx, a, s, false); } struct ggml_tensor * ggml_scale_inplace( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, true); + float s) { + return ggml_scale_impl(ctx, a, s, true); } // ggml_set @@ -10325,19 +10325,17 @@ static void ggml_compute_forward_out_prod( static void ggml_compute_forward_scale_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, - const struct ggml_tensor * src1, struct ggml_tensor * dst) { GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { return; } // scale factor - const float v = *(float *) src1->data; + const float v = *(float *) dst->op_params; const int ith = params->ith; const int nth = params->nth; @@ -10368,12 +10366,11 @@ static void ggml_compute_forward_scale_f32( static void ggml_compute_forward_scale( const struct ggml_compute_params * params, const struct ggml_tensor * src0, - const struct ggml_tensor * src1, struct ggml_tensor * dst) { switch (src0->type) { case GGML_TYPE_F32: { - ggml_compute_forward_scale_f32(params, src0, src1, dst); + ggml_compute_forward_scale_f32(params, src0, dst); } break; default: { @@ -14383,7 +14380,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm } break; case GGML_OP_SCALE: { - ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor); + ggml_compute_forward_scale(params, tensor->src[0], tensor); } break; case GGML_OP_SET: { @@ -14839,7 +14836,7 @@ static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct gg static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) { if (ggml_hash_contains(zero_table, a)) { - struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0)); + struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f); return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false); } else { return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false); @@ -14975,7 +14972,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor src0->grad, ggml_scale(ctx, ggml_mul(ctx, src0, tensor->grad), - ggml_new_f32(ctx, 2.0f)), + 2.0f), zero_table); } } break; @@ -14989,7 +14986,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor ggml_div(ctx, tensor->grad, tensor), - ggml_new_f32(ctx, 0.5f)), + 0.5f), zero_table); } } break; @@ -15155,17 +15152,12 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { // necessary for llama if (src0->grad) { + const float s = ((float *) tensor->op_params)[0]; + src0->grad = ggml_add_or_set(ctx, src0->grad, - ggml_scale_impl(ctx, tensor->grad, src1, false), - zero_table); - } - if (src1->grad) { - src1->grad = - ggml_add_or_set(ctx, - src1->grad, - ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)), + ggml_scale_impl(ctx, tensor->grad, s, false), zero_table); } } break; diff --git a/ggml.h b/ggml.h index b17314897..75918502b 100644 --- a/ggml.h +++ b/ggml.h @@ -1094,13 +1094,13 @@ extern "C" { GGML_API struct ggml_tensor * ggml_scale( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b); + float s); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_scale_inplace( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b); + float s); // b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set( diff --git a/llama.cpp b/llama.cpp index ba970ce8d..d6c192441 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4032,13 +4032,12 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * wo, struct ggml_tensor * wo_b, struct ggml_tensor * q_cur, - struct ggml_tensor * kq_scale, struct ggml_tensor * kq_mask, int64_t n_ctx, int32_t n_tokens, int32_t n_kv, float max_alibi_bias, - float scale, + float kq_scale, const llm_build_cb & cb, int il) { const int64_t n_embd = hparams.n_embd; @@ -4086,7 +4085,7 @@ static struct ggml_tensor * llm_build_kqv( kq = ggml_soft_max(ctx, kq); cb(kq, "kq_soft_max", il); } else { - kq = ggml_soft_max_ext(ctx, kq, kq_mask, scale); + kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale); cb(kq, "kq_soft_max_ext", il); } @@ -4231,10 +4230,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -4295,7 +4290,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4416,10 +4411,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -4478,7 +4469,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4536,10 +4527,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -4602,7 +4589,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4659,10 +4646,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -4702,7 +4685,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4759,10 +4742,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -4911,7 +4890,7 @@ struct llm_build_context { // TODO: not tested, could be broken cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Q, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -4965,10 +4944,6 @@ struct llm_build_context { inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); cb(inpL, "inp_embd", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5002,7 +4977,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5056,10 +5031,6 @@ struct llm_build_context { inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); cb(inpL, "inp_embd", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5099,7 +5070,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5150,10 +5121,6 @@ struct llm_build_context { inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); cb(inpL, "inp_embd", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5193,7 +5160,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5253,10 +5220,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5306,7 +5269,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5366,10 +5329,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5423,7 +5382,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); cb(cur, "kqv_out", il); } @@ -5482,14 +5441,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // Q_scale - struct ggml_tensor * Q_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(Q_scale, "Q_scale", -1); - - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5531,7 +5482,9 @@ struct llm_build_context { ); cb(Qcur, "Qcur", il); - Qcur = ggml_scale(ctx0, Qcur, Q_scale); + // with phi2, we scale the Q to avoid precision issues + // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 + Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head))); cb(Qcur, "Qcur", il); Kcur = ggml_rope_custom( @@ -5544,7 +5497,7 @@ struct llm_build_context { cur = llm_build_kqv(ctx0, model, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il); + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il); cb(cur, "kqv_out", il); } @@ -5681,8 +5634,6 @@ static const std::unordered_map k_offload_map { "pos_embd", OFFLOAD_FUNC_NR }, { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope) - { "Q_scale", OFFLOAD_FUNC_NOP }, - { "KQ_scale", OFFLOAD_FUNC_NOP }, { "KQ_mask", OFFLOAD_FUNC_FRC }, { "K_shift", OFFLOAD_FUNC_FRC }, @@ -5784,8 +5735,6 @@ static struct ggml_cgraph * llama_build_graph( bool alloc_inp_tokens = false; bool alloc_inp_embd = false; bool alloc_inp_pos = false; - bool alloc_inp_Q_scale = false; - bool alloc_inp_KQ_scale = false; bool alloc_inp_KQ_mask = false; bool alloc_inp_K_shift = false; @@ -5849,37 +5798,6 @@ static struct ggml_cgraph * llama_build_graph( alloc_inp_pos = true; } - if (!alloc_inp_Q_scale && strcmp(name, "Q_scale") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); - - if (!ggml_allocr_is_measure(lctx.alloc)) { - const int64_t n_embd_head = model.hparams.n_embd_head(); - float f = 1.0f/sqrtf(float(n_embd_head)); - ggml_backend_tensor_set(cur, &f, 0, sizeof(f)); - } - - alloc_inp_Q_scale = true; - } - - if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); - - if (!ggml_allocr_is_measure(lctx.alloc)) { - const int64_t n_embd_head = model.hparams.n_embd_head(); - float f; - if (model.arch == LLM_ARCH_PHI2) { - // with phi2, we scale the Q to avoid precision issues - // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 - f = 1.0f; - } else { - f = 1.0f/sqrtf(float(n_embd_head)); - } - ggml_backend_tensor_set(cur, &f, 0, sizeof(f)); - } - - alloc_inp_KQ_scale = true; - } - if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) { ggml_allocr_alloc(lctx.alloc, cur); @@ -9054,10 +8972,7 @@ static int llama_apply_lora_from_file_internal( ggml_set_name(BA, "BA"); if (scaling != 1.0f) { - ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx.get(), scaling); - ggml_set_name(scale_tensor, "scale_tensor"); - - BA = ggml_scale_inplace(lora_ctx.get(), BA, scale_tensor); + BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling); offload_func(BA); ggml_set_name(BA, "BA_scaled"); } diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index f04b9438a..f3df8a8c6 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -766,18 +766,19 @@ struct test_bin_bcast : public test_case { struct test_scale : public test_case { const ggml_type type; const std::array ne; + float scale; std::string vars() override { - return VARS_TO_STR2(type, ne); + return VARS_TO_STR3(type, ne, scale); } test_scale(ggml_type type = GGML_TYPE_F32, - std::array ne = {10, 10, 10, 10}) - : type(type), ne(ne) {} + std::array ne = {10, 10, 10, 10}, + float scale = 2.0f) + : type(type), ne(ne), scale(scale) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_tensor * scale = ggml_new_tensor_1d(ctx, type, 1); ggml_tensor * out = ggml_scale(ctx, a, scale); return out; } diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp index 81c20a89c..14914def5 100644 --- a/tests/test-grad0.cpp +++ b/tests/test-grad0.cpp @@ -881,19 +881,19 @@ int main(int argc, const char ** argv) { // scale { srand(seed); - const int nargs = 2; + const int nargs = 1; int64_t ne2[4]; ne2[0] = 1; for (int ndims = 1; ndims <= 2; ++ndims) { - x[1] = get_random_tensor_f32(ctx0, 1, ne2, -1.0f, 1.0f); x[0] = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f); - ggml_set_param(ctx0, x[0]); - ggml_set_param(ctx0, x[1]); + const float s = -1.0f + 2.0f*frand(); - struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], x[1])); + ggml_set_param(ctx0, x[0]); + + struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], s)); check_gradient("scale", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY); } @@ -1395,7 +1395,7 @@ int main(int argc, const char ** argv) { ggml_add1(ctx0, ggml_scale(ctx0, ggml_soft_max(ctx0, x[0]), - ggml_new_f32(ctx0, 1.0f - eps)), + 1.0f - eps), ggml_new_f32(ctx0, eps)))); check_gradient("softmax", ctx0, x, f, ndims, nargs, 1e-3f, 2e-1f, INFINITY); From c7e9701f86564088350209d2f9d71c96ea00527f Mon Sep 17 00:00:00 2001 From: crasm Date: Fri, 22 Dec 2023 01:19:36 -0500 Subject: [PATCH 36/84] llama : add ability to cancel model loading (#4462) * llama : Add ability to cancel model load Updated llama_progress_callback so that if it returns false, the model loading is aborted. * llama : Add test for model load cancellation * Fix bool return in llama_model_load, remove std::ignore use * Update llama.cpp Co-authored-by: Jared Van Bortel * Fail test if model file is missing * Revert "Fail test if model file is missing" This reverts commit 32ebd525bf7e5a87ee8a3dbaab3d92ce79fbf23d. * Add test-model-load-cancel to Makefile * Revert "Revert "Fail test if model file is missing"" This reverts commit 2796953257ee5383fa7c8fe8fa8fc888c048fb0b. * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * ci : get ci/run.sh working with test-model-load-cancel * ci : restrict .github/workflows/build.yml ctest to -L main * update requirements.txt * Disable test-model-load-cancel in make * Remove venv before creation * Restructure requirements.txt Top-level now imports the specific additional requirements for each python file. Using `pip install -r requirements.txt` will fail if versions become mismatched in the per-file requirements. * Make per-python-script requirements work alone This doesn't break the main requirements.txt. * Add comment * Add convert-persimmon-to-gguf.py to new requirements.txt scheme * Add check-requirements.sh script and GitHub workflow * Remove shellcheck installation step from workflow * Add nocleanup special arg * Fix merge see: https://github.com/ggerganov/llama.cpp/pull/4462#discussion_r1434593573 * reset to upstream/master * Redo changes for cancelling model load --------- Co-authored-by: Georgi Gerganov Co-authored-by: Jared Van Bortel --- llama.cpp | 46 +++++++++++++++++++++++++++++++++------------- llama.h | 6 ++++-- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/llama.cpp b/llama.cpp index d6c192441..cb0546c95 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2372,7 +2372,8 @@ struct llama_model_loader { } } - void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const { + // Returns false if cancelled by progress_callback + bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const { size_t size_data = 0; for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { @@ -2404,7 +2405,9 @@ struct llama_model_loader { GGML_ASSERT(cur); // unused tensors should have been caught by load_data already if (progress_callback) { - progress_callback((float) size_done / size_data, progress_callback_user_data); + if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) { + return false; + } } const size_t offs = file_offset(ggml_get_name(cur)); @@ -2466,8 +2469,11 @@ struct llama_model_loader { } if (progress_callback) { - progress_callback(1.0f, progress_callback_user_data); + // Even though the model is done loading, we still honor + // cancellation since we need to free allocations. + return progress_callback(1.0f, progress_callback_user_data); } + return true; } }; @@ -3044,7 +3050,8 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); } } -static void llm_load_tensors( +// Returns false if cancelled by progress_callback +static bool llm_load_tensors( llama_model_loader & ml, llama_model & model, int n_gpu_layers, @@ -3722,16 +3729,20 @@ static void llm_load_tensors( model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); } - ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL); + if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL)) { + return false; + } model.mapping = std::move(ml.mapping); // loading time will be recalculate after the first eval, so // we take page faults deferred by mmap() into consideration model.t_load_us = ggml_time_us() - model.t_start_us; + return true; } -static bool llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) { +// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback +static int llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) { try { llama_model_loader ml(fname, params.use_mmap, params.kv_overrides); @@ -3749,19 +3760,21 @@ static bool llama_model_load(const std::string & fname, llama_model & model, con if (params.vocab_only) { LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); - return true; + return 0; } - llm_load_tensors( + if (!llm_load_tensors( ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock, params.progress_callback, params.progress_callback_user_data - ); + )) { + return -2; + } } catch (const std::exception & err) { LLAMA_LOG_ERROR("error loading model: %s\n", err.what()); - return false; + return -1; } - return true; + return 0; } // @@ -9141,11 +9154,18 @@ struct llama_model * llama_load_model_from_file( LLAMA_LOG_INFO("\n"); } } + return true; }; } - if (!llama_model_load(path_model, *model, params)) { - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); + int status = llama_model_load(path_model, *model, params); + GGML_ASSERT(status <= 0); + if (status < 0) { + if (status == -1) { + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); + } else if (status == -2) { + LLAMA_LOG_INFO("%s: cancelled model load\n", __func__); + } delete model; return nullptr; } diff --git a/llama.h b/llama.h index 0be4b1337..af76bae2d 100644 --- a/llama.h +++ b/llama.h @@ -127,7 +127,7 @@ extern "C" { bool sorted; } llama_token_data_array; - typedef void (*llama_progress_callback)(float progress, void *ctx); + typedef bool (*llama_progress_callback)(float progress, void *ctx); // Input data for llama_decode // A llama_batch object can contain input about one or many sequences @@ -180,7 +180,9 @@ extern "C" { int32_t main_gpu; // the GPU that is used for scratch and small tensors const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) - // called with a progress value between 0 and 1, pass NULL to disable + // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. + // If the provided progress_callback returns true, model loading continues. + // If it returns false, model loading is immediately aborted. llama_progress_callback progress_callback; // context pointer passed to the progress callback From 0137ef88ea9f8fd837a065700814329d24adeec3 Mon Sep 17 00:00:00 2001 From: bobqianic <129547291+bobqianic@users.noreply.github.com> Date: Fri, 22 Dec 2023 06:47:01 +0000 Subject: [PATCH 37/84] ggml : extend `enum ggml_log_level` with `GGML_LOG_LEVEL_DEBUG` (#4579) --- ggml.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml.h b/ggml.h index 75918502b..338f355a4 100644 --- a/ggml.h +++ b/ggml.h @@ -484,7 +484,8 @@ extern "C" { enum ggml_log_level { GGML_LOG_LEVEL_ERROR = 2, GGML_LOG_LEVEL_WARN = 3, - GGML_LOG_LEVEL_INFO = 4 + GGML_LOG_LEVEL_INFO = 4, + GGML_LOG_LEVEL_DEBUG = 5 }; // ggml object From 2bb98279c5a087d62949972b35cf63ff974ffe6a Mon Sep 17 00:00:00 2001 From: Deins Date: Fri, 22 Dec 2023 08:49:54 +0200 Subject: [PATCH 38/84] readme : add zig bindings (#4581) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 73fe59bb4..8e17d5ba4 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,7 @@ as the main playground for developing new features for the [ggml](https://github - Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj) - React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn) - Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp) +- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig) **UI:** From f31b98489824a86c937fa62ccf5dfd4bb0327b86 Mon Sep 17 00:00:00 2001 From: rhuddleston Date: Thu, 21 Dec 2023 23:56:34 -0700 Subject: [PATCH 39/84] ci : tag docker image with build number (#4584) --- .github/workflows/docker.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a7165a38f..7f4de50ea 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -69,6 +69,19 @@ jobs: docker-images: true swap-storage: true + - name: Determine tag name + id: tag + shell: bash + run: | + BUILD_NUMBER="$(git rev-list --count HEAD)" + SHORT_HASH="$(git rev-parse --short=7 HEAD)" + if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then + echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT + else + SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') + echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT + fi + - name: Build and push Docker image (versioned) if: github.event_name == 'push' uses: docker/build-push-action@v4 @@ -85,5 +98,5 @@ jobs: context: . push: ${{ github.event_name == 'push' }} platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}" + tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}" , "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ steps.tag.outputs.name }}" file: ${{ matrix.config.dockerfile }} From 28cb35a0ecb9852adc3494aa51dde60141939d64 Mon Sep 17 00:00:00 2001 From: Michael Kesper Date: Fri, 22 Dec 2023 09:03:25 +0100 Subject: [PATCH 40/84] make : add LLAMA_HIP_UMA option (#4587) NB: LLAMA_HIP_UMA=1 (or any value) adds MK_CPPFLAG -DGGML_HIP_UMA --- Makefile | 3 +++ README.md | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 68df7702a..42686ce71 100644 --- a/Makefile +++ b/Makefile @@ -452,6 +452,9 @@ ifdef LLAMA_HIPBLAS LLAMA_CUDA_MMV_Y ?= 1 LLAMA_CUDA_KQUANTS_ITER ?= 2 MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS +ifdef LLAMA_HIP_UMA + MK_CPPFLAGS += -DGGML_HIP_UMA +endif # LLAMA_HIP_UMA MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) diff --git a/README.md b/README.md index 8e17d5ba4..377d3928b 100644 --- a/README.md +++ b/README.md @@ -440,7 +440,13 @@ Building the program with BLAS support may lead to some performance improvements && cmake --build build -- -j 16 ``` On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`. - However, this hurts performance for non-integrated GPUs. + However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). + + - Using `make` (example for target gfx1030, build with 16 CPU threads): + ```bash + make -j16 LLAMA_HIPBLAS=1 LLAMA_HIP_UMA=1 AMDGPU_TARGETS=gxf1030 + ``` + - Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): ```bash set PATH=%HIP_PATH%\bin;%PATH% From 48b24b170e3b4f9dc28200306840cb07d1c123df Mon Sep 17 00:00:00 2001 From: Herman Semenov Date: Fri, 22 Dec 2023 09:26:49 +0000 Subject: [PATCH 41/84] ggml : add comment about backward GGML_OP_DIAG_MASK_INF (#4203) --- ggml.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ggml.c b/ggml.c index f27920a2d..15e1984d1 100644 --- a/ggml.c +++ b/ggml.c @@ -15335,6 +15335,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor const int n_past = ((int32_t *) tensor->op_params)[0]; src0->grad = ggml_add_or_set(ctx, src0->grad, + /* ggml_diag_mask_inf_impl() shouldn't be here */ + /* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */ ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), zero_table); } From 48b7ff193e64c97ab174280ba0eb8d14b47c49ba Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 22 Dec 2023 12:12:53 +0100 Subject: [PATCH 42/84] llama : fix platforms without mmap (#4578) * llama : fix platforms without mmap * win32 : limit prefetch size to the file size * fix win32 error clobber, unnecessary std::string in std::runtime_error --- ggml-cuda.cu | 3 ++- ggml.c | 6 ++++-- llama.cpp | 36 ++++++++++++++++++------------------ 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index ac91ee12e..37d7f2792 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7702,7 +7702,8 @@ inline void ggml_cuda_op_scale( GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); - const float scale = ((float *) dst->op_params)[0]; + float scale; + memcpy(&scale, dst->op_params, sizeof(float)); scale_f32_cuda(src0_dd, dst_dd, scale, ggml_nelements(src0), main_stream); CUDA_CHECK(cudaGetLastError()); diff --git a/ggml.c b/ggml.c index 15e1984d1..3656422d7 100644 --- a/ggml.c +++ b/ggml.c @@ -10335,7 +10335,8 @@ static void ggml_compute_forward_scale_f32( } // scale factor - const float v = *(float *) dst->op_params; + float v; + memcpy(&v, dst->op_params, sizeof(float)); const int ith = params->ith; const int nth = params->nth; @@ -15152,7 +15153,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { // necessary for llama if (src0->grad) { - const float s = ((float *) tensor->op_params)[0]; + float s; + memcpy(&s, tensor->op_params, sizeof(float)); src0->grad = ggml_add_or_set(ctx, diff --git a/llama.cpp b/llama.cpp index cb0546c95..4e4495739 100644 --- a/llama.cpp +++ b/llama.cpp @@ -778,7 +778,7 @@ struct llama_file { throw std::runtime_error(format("read error: %s", strerror(errno))); } if (ret != 1) { - throw std::runtime_error(std::string("unexpectedly reached end of file")); + throw std::runtime_error("unexpectedly reached end of file"); } } @@ -931,29 +931,29 @@ struct llama_mmap { #elif defined(_WIN32) static constexpr bool SUPPORTED = true; - llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { - (void) numa; + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) { + GGML_UNUSED(numa); size = file->size; HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); - DWORD error = GetLastError(); if (hMapping == NULL) { + DWORD error = GetLastError(); throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); } addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); - error = GetLastError(); + DWORD error = GetLastError(); CloseHandle(hMapping); if (addr == NULL) { throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); } - if (prefetch) { + if (prefetch > 0) { // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); @@ -965,9 +965,9 @@ struct llama_mmap { // advise the kernel to preload the mapped memory WIN32_MEMORY_RANGE_ENTRY range; range.VirtualAddress = addr; - range.NumberOfBytes = (SIZE_T)size; + range.NumberOfBytes = (SIZE_T) std::min(size, prefetch); if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { - fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", + LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n", llama_format_win_err(GetLastError()).c_str()); } } @@ -982,26 +982,26 @@ struct llama_mmap { ~llama_mmap() { if (!UnmapViewOfFile(addr)) { - fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", + LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n", llama_format_win_err(GetLastError()).c_str()); } } #else static constexpr bool SUPPORTED = false; - llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { - (void) file; - (void) prefetch; - (void) numa; + llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) { + GGML_UNUSED(file); + GGML_UNUSED(prefetch); + GGML_UNUSED(numa); - throw std::runtime_error(std::string("mmap not supported")); + throw std::runtime_error("mmap not supported"); } - void unmap(size_t offset, size_t len) { - (void) offset; - (void) len; + void unmap_fragment(size_t first, size_t last) { + GGML_UNUSED(first); + GGML_UNUSED(last); - throw std::runtime_error(std::string("mmap not supported")); + throw std::runtime_error("mmap not supported"); } #endif }; From 6724ef16573ec7ecce620be56cbbff145856b2fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Forst=C3=A9n?= Date: Fri, 22 Dec 2023 15:34:05 +0200 Subject: [PATCH 43/84] Fix CudaMemcpy direction (#4599) --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 37d7f2792..da8fd1e09 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -8843,7 +8843,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s const cudaMemcpyKind src1_kind = src1->backend == GGML_BACKEND_CPU ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; const cudaMemcpyKind dst_kind = dst->backend == GGML_BACKEND_CPU ? - cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; + cudaMemcpyDeviceToHost : cudaMemcpyDeviceToDevice; for (int32_t row_id = 0; row_id < n_as; ++row_id) { const struct ggml_tensor * src0_row = dst->src[row_id + 2]; From a55876955b1a83464171de8d578d3ab062a7b62d Mon Sep 17 00:00:00 2001 From: FantasyGmm <16450052+FantasyGmm@users.noreply.github.com> Date: Fri, 22 Dec 2023 23:11:12 +0800 Subject: [PATCH 44/84] cuda : fix jetson compile error (#4560) * fix old jetson compile error * Update Makefile * update jetson detect and cuda version detect * update cuda marco define * update makefile and cuda,fix some issue * Update README.md Co-authored-by: Georgi Gerganov * Update Makefile * Update README.md --------- Co-authored-by: Georgi Gerganov --- Makefile | 22 +++++++++++++++++++--- README.md | 3 +++ ggml-cuda.cu | 7 +++++++ ggml-quants.c | 4 ++-- 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 42686ce71..6a998091b 100644 --- a/Makefile +++ b/Makefile @@ -282,8 +282,17 @@ endif ifneq ($(filter aarch64%,$(UNAME_M)),) # Apple M1, M2, etc. # Raspberry Pi 3, 4, Zero 2 (64-bit) + # Nvidia Jetson MK_CFLAGS += -mcpu=native MK_CXXFLAGS += -mcpu=native + JETSON_RELEASE_INFO = $(shell jetson_release) + ifdef JETSON_RELEASE_INFO + ifneq ($(filter TX2%,$(JETSON_RELEASE_INFO)),) + JETSON_EOL_MODULE_DETECT = 1 + CC = aarch64-unknown-linux-gnu-gcc + cxx = aarch64-unknown-linux-gnu-g++ + endif + endif endif ifneq ($(filter armv6%,$(UNAME_M)),) @@ -357,10 +366,13 @@ ifdef LLAMA_BLIS endif # LLAMA_BLIS ifdef LLAMA_CUBLAS - MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include - MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib + MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include + MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib OBJS += ggml-cuda.o - MK_NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math + MK_NVCCFLAGS = -use_fast_math +ifndef JETSON_EOL_MODULE_DETECT + MK_NVCCFLAGS += --forward-unknown-to-host-compiler +endif # JETSON_EOL_MODULE_DETECT ifdef LLAMA_DEBUG MK_NVCCFLAGS += -lineinfo @@ -417,7 +429,11 @@ ifdef LLAMA_CUDA_CCBIN MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN) endif ggml-cuda.o: ggml-cuda.cu ggml-cuda.h +ifdef JETSON_EOL_MODULE_DETECT + $(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@ +else $(NVCC) $(BASE_CXXFLAGS) $(NVCCFLAGS) -Wno-pedantic -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@ +endif # JETSON_EOL_MODULE_DETECT endif # LLAMA_CUBLAS ifdef LLAMA_CLBLAST diff --git a/README.md b/README.md index 377d3928b..649c3b333 100644 --- a/README.md +++ b/README.md @@ -396,6 +396,9 @@ Building the program with BLAS support may lead to some performance improvements - #### cuBLAS This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). + + For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling. + - Using `make`: ```bash make LLAMA_CUBLAS=1 diff --git a/ggml-cuda.cu b/ggml-cuda.cu index da8fd1e09..b124774a9 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -90,6 +90,13 @@ #include #include #include +// CUDA 10.2 does not have these macro definitions. +#ifndef CUBLAS_TF32_TENSOR_OP_MATH +#define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH +#define CUBLAS_COMPUTE_16F CUDA_R_16F +#define CUBLAS_COMPUTE_32F CUDA_R_32F +#define cublasComputeType_t cudaDataType_t +#endif #endif // defined(GGML_USE_HIPBLAS) #include "ggml-cuda.h" diff --git a/ggml-quants.c b/ggml-quants.c index 0e8163a16..a15a24048 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -3677,7 +3677,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); - const ggml_int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}; + const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), @@ -6626,7 +6626,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); const int8x16_t scales = vld1q_s8(scale); - const ggml_int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}; + const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), From ba661751322a7c201fd3bef71af077c5aebfaa2a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 22 Dec 2023 17:53:43 +0200 Subject: [PATCH 45/84] sync : ggml (fix im2col) (#4591) * cuda : fix im2col_f32_f16 (ggml/#658) ggml-ci * ggml-alloc : fix ggml_tallocr_is_own --------- Co-authored-by: leejet --- ggml-alloc.c | 2 +- ggml-cuda.cu | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index a97436b17..a27dd54b0 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -72,7 +72,7 @@ static void remove_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * t // check if a tensor is allocated by this buffer static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) { - return tensor->buffer == alloc->buffer; + return tensor->buffer == alloc->buffer && (!tensor->view_src || tensor->view_src->buffer == alloc->buffer); } static bool ggml_is_view(struct ggml_tensor * t) { diff --git a/ggml-cuda.cu b/ggml-cuda.cu index b124774a9..7c2a834e3 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -5273,17 +5273,17 @@ static __global__ void im2col_f32_f16( const int ky = (i - kd) / OW; const int ix = i % OW; - const int iiw = ix * s0 + kx * d0 - p0; - const int iih = blockIdx.y * s1 + ky * d1 - p1; + const int64_t iiw = ix * s0 + kx * d0 - p0; + const int64_t iih = blockIdx.y * s1 + ky * d1 - p1; - const int offset_dst = + const int64_t offset_dst = (blockIdx.y * OW + ix) * CHW + (blockIdx.z * (KW * KH) + ky * KW + kx); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = __float2half(0.0f); } else { - const int offset_src = blockIdx.z * offset_delta; + const int64_t offset_src = blockIdx.z * offset_delta; dst[offset_dst] = __float2half(x[offset_src + iih * IW + iiw]); } } From 7082d24cec35e9ce9147535a2224dfc67ee0a78c Mon Sep 17 00:00:00 2001 From: LeonEricsson <70749762+LeonEricsson@users.noreply.github.com> Date: Fri, 22 Dec 2023 17:05:56 +0100 Subject: [PATCH 46/84] lookup : add prompt lookup decoding example (#4484) * initial commit, going through initializations * main loop finished, starting to debug * BUG: generates gibberish/repeating tokens after a while * kv_cache management * Added colors to distinguish drafted tokens (--color). Updated README * lookup : fix token positions in the draft batch * lookup : use n_draft from CLI params * lookup : final touches --------- Co-authored-by: Leon Ericsson Co-authored-by: Georgi Gerganov --- .gitignore | 1 + Makefile | 5 +- common/common.h | 3 +- examples/CMakeLists.txt | 1 + examples/lookup/CMakeLists.txt | 5 + examples/lookup/README.md | 13 ++ examples/lookup/lookup.cpp | 230 +++++++++++++++++++++++++++++++++ 7 files changed, 256 insertions(+), 2 deletions(-) create mode 100644 examples/lookup/CMakeLists.txt create mode 100644 examples/lookup/README.md create mode 100644 examples/lookup/lookup.cpp diff --git a/.gitignore b/.gitignore index 76b3d2861..def74a1e9 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ models-mnt /llama-bench /llava-cli /lookahead +/lookup /main /metal /perplexity diff --git a/Makefile b/Makefile index 6a998091b..cb5a4e948 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ BUILD_TARGETS = \ main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \ simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \ - speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead tests/test-c.o + speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup tests/test-c.o # Binaries only useful for tests TEST_TARGETS = \ @@ -664,6 +664,9 @@ parallel: examples/parallel/parallel.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) +lookup: examples/lookup/lookup.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + ifdef LLAMA_METAL metal: examples/metal/metal.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) diff --git a/common/common.h b/common/common.h index e87ce1133..9659aa045 100644 --- a/common/common.h +++ b/common/common.h @@ -51,7 +51,7 @@ struct gpt_params { int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_draft = 16; // number of tokens to draft during speculative decoding + int32_t n_draft = 8; // number of tokens to draft during speculative decoding int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_parallel = 1; // number of parallel sequences to decode int32_t n_sequences = 1; // number of sequences to decode @@ -240,3 +240,4 @@ void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80); // Dump the KV cache view showing individual sequences in each cell (long output). void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40); + diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 6744944fd..4cc13d6e9 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -33,6 +33,7 @@ else() add_subdirectory(simple) add_subdirectory(speculative) add_subdirectory(lookahead) + add_subdirectory(lookup) add_subdirectory(train-text-from-scratch) if (LLAMA_METAL) add_subdirectory(metal) diff --git a/examples/lookup/CMakeLists.txt b/examples/lookup/CMakeLists.txt new file mode 100644 index 000000000..c060b8f56 --- /dev/null +++ b/examples/lookup/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET lookup) +add_executable(${TARGET} lookup.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/lookup/README.md b/examples/lookup/README.md new file mode 100644 index 000000000..5bfb0de93 --- /dev/null +++ b/examples/lookup/README.md @@ -0,0 +1,13 @@ +# llama.cpp/examples/lookup + +Demonstration of Prompt Lookup Decoding + +https://github.com/apoorvumang/prompt-lookup-decoding + +The key parameters for lookup decoding are `ngram_min`, `ngram_max` and `n_draft`. The first two determine the size of the ngrams to search for in the prompt for a match. The latter specifies how many subsequent tokens to draft if a match is found. + +More info: + +https://github.com/ggerganov/llama.cpp/pull/4484 +https://github.com/ggerganov/llama.cpp/issues/4226 + diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp new file mode 100644 index 000000000..d8de7dd38 --- /dev/null +++ b/examples/lookup/lookup.cpp @@ -0,0 +1,230 @@ +#include "common.h" +#include "llama.h" + +#include +#include +#include +#include + +int main(int argc, char ** argv){ + gpt_params params; + + if (!gpt_params_parse(argc, argv, params)) { + return 1; + } + + // max/min n-grams size to search for in prompt + const int ngram_max = 4; + const int ngram_min = 1; + + // length of the candidate / draft sequence, if match is found + const int n_draft = params.n_draft; + + const bool dump_kv_cache = params.dump_kv_cache; + +#ifndef LOG_DISABLE_LOGS + log_set_target(log_filename_generator("lookup", "log")); + LOG_TEE("Log start\n"); + log_dump_cmdline(argc, argv); +#endif // LOG_DISABLE_LOGS + + // init llama.cpp + llama_backend_init(params.numa); + + llama_model * model = NULL; + llama_context * ctx = NULL; + + // load the model + std::tie(model, ctx) = llama_init_from_gpt_params(params); + + // tokenize the prompt + const bool add_bos = llama_should_add_bos_token(model); + LOG("add_bos tgt: %d\n", add_bos); + + std::vector inp; + inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + + const int max_context_size = llama_n_ctx(ctx); + const int max_tokens_list_size = max_context_size - 4; + + if ((int) inp.size() > max_tokens_list_size) { + fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size); + return 1; + } + + fprintf(stderr, "\n\n"); + + for (auto id : inp) { + fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str()); + } + + fflush(stderr); + + const int n_input = inp.size(); + + const auto t_enc_start = ggml_time_us(); + + llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0)); + llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0)); + + const auto t_enc_end = ggml_time_us(); + + int n_predict = 0; + int n_drafted = 0; + int n_accept = 0; + + int n_past = inp.size(); + + bool has_eos = false; + + struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams); + + std::vector draft; + + llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, 1); + + // debug + struct llama_kv_cache_view kvc_view = llama_kv_cache_view_init(ctx, 1); + + const auto t_dec_start = ggml_time_us(); + + while (true) { + // debug + if (dump_kv_cache) { + llama_kv_cache_view_update(ctx, &kvc_view); + dump_kv_cache_view_seqs(kvc_view, 40); + } + + // print current draft sequence + LOG("drafted %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, draft).c_str()); + + int i_dft = 0; + while (true) { + // sample from the target model + llama_token id = llama_sampling_sample(ctx_sampling, ctx, NULL, i_dft); + + llama_sampling_accept(ctx_sampling, ctx, id, true); + + const std::string token_str = llama_token_to_piece(ctx, id); + + if (!params.use_color) { + printf("%s", token_str.c_str()); + } + + if (id == llama_token_eos(model)) { + has_eos = true; + } + + ++n_predict; + + // check if the target token matches the draft + if (i_dft < (int) draft.size() && id == draft[i_dft]) { + LOG("the sampled target token matches the %dth drafted token (%d, '%s') - accepted\n", i_dft, id, token_str.c_str()); + ++n_accept; + ++n_past; + ++i_dft; + inp.push_back(id); + + if (params.use_color) { + // color accepted draft token + printf("\033[34m%s\033[0m", token_str.c_str()); + fflush(stdout); + } + continue; + } + + if (params.use_color) { + printf("%s", token_str.c_str()); + } + fflush(stdout); + + + LOG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", id, token_str.c_str()); + + draft.clear(); + draft.push_back(id); + inp.push_back(id); + break; + } + + if ((params.n_predict > 0 && n_predict > params.n_predict) || has_eos) { + break; + } + + // KV cache management + // clean the cache of draft tokens that weren't accepted + llama_kv_cache_seq_rm(ctx, 0, n_past, -1); + + llama_batch_clear(batch_tgt); + llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true); + + // generate n_pred tokens through prompt lookup + auto prompt_lookup = [&]() -> void { + int inp_size = inp.size(); + for (int ngram_size = ngram_max ; ngram_size > ngram_min; --ngram_size){ + const llama_token * ngram = &inp[inp_size - ngram_size]; + + for (int i = 0; i <= (int) inp_size - (ngram_size * 2); ++i) { + bool match = true; + for (int j = 0; j < ngram_size; ++j) { + if (inp[i + j] != ngram[j]) { + match = false; + break; + } + } + + if (match) { + const int startIdx = i + ngram_size; + const int endIdx = startIdx + n_draft; + if (endIdx < inp_size) { + for (int j = startIdx; j < endIdx; ++j) { + LOG(" - draft candidate %d: %d\n", j, inp[j]); + draft.push_back(inp[j]); + llama_batch_add(batch_tgt, inp[j], n_past + (j - startIdx) + 1, { 0 }, true); + ++n_drafted; + } + return; + } + } + } + } + return; + }; + + prompt_lookup(); + + llama_decode(ctx, batch_tgt); + ++n_past; + + draft.erase(draft.begin()); + } + + auto t_dec_end = ggml_time_us(); + + LOG_TEE("\n\n"); + + LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f)); + LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f)); + + LOG_TEE("\n"); + LOG_TEE("n_draft = %d\n", n_draft); + LOG_TEE("n_predict = %d\n", n_predict); + LOG_TEE("n_drafted = %d\n", n_drafted); + LOG_TEE("n_accept = %d\n", n_accept); + LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted); + + LOG_TEE("\ntarget:\n"); + llama_print_timings(ctx); + + llama_sampling_free(ctx_sampling); + llama_batch_free(batch_tgt); + + llama_free(ctx); + llama_free_model(model); + + llama_backend_free(); + + fprintf(stderr, "\n\n"); + + return 0; +} From e0a4002273907b2c414b6b5442d99e08bfe2df35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sat, 23 Dec 2023 09:16:33 +0100 Subject: [PATCH 47/84] CUDA: fixed row rounding for 0 tensor splits (#4594) --- ggml-cuda.cu | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 7c2a834e3..490081cac 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7937,12 +7937,16 @@ static void ggml_cuda_op_mul_mat( if (id != 0) { row_low[id] = ne01*g_tensor_split[id]; - row_low[id] -= row_low[id] % rounding; + if (row_low[id] < ne01) { + row_low[id] -= row_low[id] % rounding; + } } if (id != g_device_count - 1) { row_high[id] = ne01*g_tensor_split[id + 1]; - row_high[id] -= row_high[id] % rounding; + if (row_high[id] < ne01) { + row_high[id] -= row_high[id] % rounding; + } } } } From b9ec82d262cb20d7f0a8a1157bfa9aace40e2625 Mon Sep 17 00:00:00 2001 From: kalomaze <66376113+kalomaze@users.noreply.github.com> Date: Sat, 23 Dec 2023 03:27:07 -0600 Subject: [PATCH 48/84] grammar : check the full vocab only if necessary (opt) (#4306) * Check the full vocab for grammar only if necessary * Fix missing logit restoration step (?) Does this matter, actually? * Fix whitespace / formatting * Adjust comment * Didn't mean to push test gbnf * Split sampling into the helper function (?) And also revert the changes made to the header * common : fix final newline --------- Co-authored-by: Georgi Gerganov --- common/sampling.cpp | 48 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/common/sampling.cpp b/common/sampling.cpp index f4e76df31..5b15204be 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -149,11 +149,12 @@ static void sampler_queue( } } -llama_token llama_sampling_sample( +static llama_token llama_sampling_sample_impl( struct llama_sampling_context * ctx_sampling, struct llama_context * ctx_main, struct llama_context * ctx_cfg, - const int idx) { + const int idx, + bool is_resampling) { // Add a parameter to indicate if we are resampling const llama_sampling_params & params = ctx_sampling->params; const int n_vocab = llama_n_vocab(llama_get_model(ctx_main)); @@ -173,8 +174,17 @@ llama_token llama_sampling_sample( llama_token id = 0; + // Get a pointer to the logits float * logits = llama_get_logits_ith(ctx_main, idx); + // Declare original_logits at the beginning of the function scope + std::vector original_logits; + + if (!is_resampling) { + // Only make a copy of the original logits if we are not in the resampling phase, not sure if I actually have to do this. + original_logits = std::vector(logits, logits + llama_n_vocab(llama_get_model(ctx_main))); + } + // apply params.logit_bias map for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) { logits[it->first] += it->second; @@ -210,7 +220,8 @@ llama_token llama_sampling_sample( } } - if (ctx_sampling->grammar != NULL) { + // If we are in the resampling phase, apply grammar checks before sampling logic + if (is_resampling && ctx_sampling->grammar != NULL) { llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar); } @@ -252,9 +263,40 @@ llama_token llama_sampling_sample( } } + if (ctx_sampling->grammar != NULL && !is_resampling) { + // Create an array with a single token data element for the sampled id + llama_token_data single_token_data = {id, logits[id], 0.0f}; + llama_token_data_array single_token_data_array = { &single_token_data, 1, false }; + + // Apply grammar constraints to the single token + llama_sample_grammar(ctx_main, &single_token_data_array, ctx_sampling->grammar); + + // Check if the token is valid according to the grammar by seeing if its logit has been set to -INFINITY + bool is_valid = single_token_data_array.data[0].logit != -INFINITY; + + // If the token is not valid according to the grammar, perform resampling + if (!is_valid) { + LOG("Resampling because token %d: '%s' does not meet grammar rules\n", id, llama_token_to_piece(ctx_main, id).c_str()); + + // Restore logits from the copy + std::copy(original_logits.begin(), original_logits.end(), logits); + + return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, true); // Pass true for is_resampling + } + } + return id; } +llama_token llama_sampling_sample( + struct llama_sampling_context * ctx_sampling, + struct llama_context * ctx_main, + struct llama_context * ctx_cfg, + const int idx) { + // Call the implementation function with is_resampling set to false by default + return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false); +} + void llama_sampling_accept( struct llama_sampling_context * ctx_sampling, struct llama_context * ctx_main, From 6123979952385847d8348e295d77d6e01da8aa84 Mon Sep 17 00:00:00 2001 From: Alexey Parfenov Date: Sat, 23 Dec 2023 09:31:49 +0000 Subject: [PATCH 49/84] server : allow to specify custom prompt for penalty calculation (#3727) --- common/sampling.cpp | 8 ++++--- common/sampling.h | 3 +++ examples/server/README.md | 2 ++ examples/server/server.cpp | 44 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/common/sampling.cpp b/common/sampling.cpp index 5b15204be..8e45909f1 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -203,12 +203,14 @@ static llama_token llama_sampling_sample_impl( } // apply penalties - if (!prev.empty()) { + const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev; + const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n); + if (penalty_tokens_used_size) { const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))]; llama_sample_repetition_penalties(ctx_main, &cur_p, - prev.data() + prev.size() - penalty_last_n, - penalty_last_n, penalty_repeat, penalty_freq, penalty_present); + penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size, + penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present); if (!penalize_nl) { for (size_t idx = 0; idx < cur_p.size; idx++) { diff --git a/common/sampling.h b/common/sampling.h index fdfa9eed1..f16ef97e3 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -36,6 +36,9 @@ typedef struct llama_sampling_params { float cfg_scale = 1.f; // how strong is guidance std::unordered_map logit_bias; // logit bias for specific tokens + + std::vector penalty_prompt_tokens; + bool use_penalty_prompt_tokens = false; } llama_sampling_params; // general sampler context diff --git a/examples/server/README.md b/examples/server/README.md index 0751b9612..f1e586a1c 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -148,6 +148,8 @@ node index.js `frequency_penalty`: Repeat alpha frequency penalty (default: 0.0, 0.0 = disabled); + `penalty_prompt`: This will replace the `prompt` for the purpose of the penalty evaluation. Can be either `null`, a string or an array of numbers representing tokens (default: `null` = use the original `prompt`). + `mirostat`: Enable Mirostat sampling, controlling perplexity during text generation (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). `mirostat_tau`: Set the Mirostat target entropy, parameter tau (default: 5.0). diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 04038530f..72dfe452c 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -761,6 +761,42 @@ struct llama_server_context slot->prompt = ""; } + slot->sparams.penalty_prompt_tokens.clear(); + slot->sparams.use_penalty_prompt_tokens = false; + const auto &penalty_prompt = data.find("penalty_prompt"); + if (penalty_prompt != data.end()) + { + if (penalty_prompt->is_string()) + { + const auto penalty_prompt_string = penalty_prompt->get(); + auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false); + slot->sparams.penalty_prompt_tokens.swap(penalty_tokens); + if (slot->params.n_predict > 0) + { + slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict); + } + slot->sparams.use_penalty_prompt_tokens = true; + } + else if (penalty_prompt->is_array()) + { + const auto n_tokens = penalty_prompt->size(); + slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict)); + const int n_vocab = llama_n_vocab(model); + for (const auto &penalty_token : *penalty_prompt) + { + if (penalty_token.is_number_integer()) + { + const auto tok = penalty_token.get(); + if (tok >= 0 && tok < n_vocab) + { + slot->sparams.penalty_prompt_tokens.push_back(tok); + } + } + } + slot->sparams.use_penalty_prompt_tokens = true; + } + } + slot->sparams.logit_bias.clear(); if (json_value(data, "ignore_eos", false)) @@ -992,6 +1028,12 @@ struct llama_server_context slot.generated_text += token_str; slot.has_next_token = true; + if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1) + { + // we can change penalty_prompt_tokens because it is always created from scratch each request + slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok); + } + // check if there is incomplete UTF-8 character at the end bool incomplete = false; for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) @@ -1183,6 +1225,8 @@ struct llama_server_context {"repeat_penalty", slot.sparams.penalty_repeat}, {"presence_penalty", slot.sparams.penalty_present}, {"frequency_penalty", slot.sparams.penalty_freq}, + {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens}, + {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens}, {"mirostat", slot.sparams.mirostat}, {"mirostat_tau", slot.sparams.mirostat_tau}, {"mirostat_eta", slot.sparams.mirostat_eta}, From 925e5584a058afb612f9c20bc472c130f5d0f891 Mon Sep 17 00:00:00 2001 From: Samuel Maynard Date: Sat, 23 Dec 2023 11:35:55 +0200 Subject: [PATCH 50/84] ci(docker): fix tags in "Build and push docker image (tagged)" (#4603) --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 7f4de50ea..87904b75e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -98,5 +98,5 @@ jobs: context: . push: ${{ github.event_name == 'push' }} platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}" , "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ steps.tag.outputs.name }}" + tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }},ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ steps.tag.outputs.name }}" file: ${{ matrix.config.dockerfile }} From 708e179e8562c2604240df95a2241dea17fd808b Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 16:10:51 +0100 Subject: [PATCH 51/84] fallback to CPU buffer if host buffer alloc fails (#4610) --- ggml-cuda.cu | 11 ++++++----- llama.cpp | 16 +++++++++++----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 490081cac..f9830328b 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6729,8 +6729,7 @@ void * ggml_cuda_host_malloc(size_t size) { void * ptr = nullptr; cudaError_t err = cudaMallocHost((void **) &ptr, size); if (err != cudaSuccess) { - // The allocation error can be bypassed. A null ptr will assigned out of this function. - // This can fixed the OOM error in WSL. + // clear the error cudaGetLastError(); fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n", size/1024.0/1024.0, cudaGetErrorString(err)); @@ -9674,12 +9673,14 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { // host buffer type static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { - CUDA_CHECK(cudaFreeHost(buffer->context)); + ggml_cuda_host_free(buffer->context); } static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - void * ptr; - CUDA_CHECK(cudaMallocHost(&ptr, size)); + void * ptr = ggml_cuda_host_malloc(size); + if (ptr == nullptr) { + return nullptr; + } // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); diff --git a/llama.cpp b/llama.cpp index 4e4495739..5699a0fcf 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1177,21 +1177,27 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_ } static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) { + ggml_backend_buffer_type_t buft = nullptr; + #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { - return ggml_backend_metal_buffer_type(); + buft = ggml_backend_metal_buffer_type(); } #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (n_gpu_layers > 0) { - return ggml_backend_cuda_buffer_type(0); + buft = ggml_backend_cuda_buffer_type(0); } #elif defined(GGML_USE_CUBLAS) - return ggml_backend_cuda_host_buffer_type(); + buft = ggml_backend_cuda_host_buffer_type(); #elif defined(GGML_USE_CPU_HBM) - return ggml_backend_cpu_hbm_buffer_type(); + buft = ggml_backend_cpu_hbm_buffer_type(); #endif - return ggml_backend_cpu_buffer_type(); + if (buft == nullptr) { + buft = ggml_backend_cpu_buffer_type(); + } + + return buft; GGML_UNUSED(n_gpu_layers); } From 5bf3953d7e9831ea22b0bc017ce97409b801ccf1 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 14:34:22 +0100 Subject: [PATCH 52/84] cuda : improve cuda pool efficiency using virtual memory (#4606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cuda : improve cuda pool efficiency using virtual memory * fix mixtral * fix cmake build * check for vmm support, disable for hip ggml-ci * fix hip build * clarify granularity * move all caps to g_device_caps * refactor error checking * add cuda_pool_alloc, refactor most pool allocations ggml-ci * fix hip build * CUBLAS_TF32_TENSOR_OP_MATH is not a macro * more hip crap * llama : fix msvc warnings * ggml : fix msvc warnings * minor * minor * cuda : fallback to CPU on host buffer alloc fail * Update ggml-cuda.cu Co-authored-by: Johannes Gäßler * Update ggml-cuda.cu Co-authored-by: Johannes Gäßler * ensure allocations are always aligned * act_size -> actual_size --------- Co-authored-by: Johannes Gäßler --- CMakeLists.txt | 2 + Makefile | 6 +- ggml-backend.c | 16 +- ggml-cuda.cu | 499 +++++++++++++++++++++++++++---------------- ggml.c | 2 +- ggml.h | 2 + llama.cpp | 6 +- tests/test-grad0.cpp | 3 - 8 files changed, 328 insertions(+), 208 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fc6508c5..545aab267 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -302,6 +302,8 @@ if (LLAMA_CUBLAS) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) endif() + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) + if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) # 52 == lowest CUDA 12 standard # 60 == f16 CUDA intrinsics diff --git a/Makefile b/Makefile index cb5a4e948..28c6d79bc 100644 --- a/Makefile +++ b/Makefile @@ -367,17 +367,15 @@ endif # LLAMA_BLIS ifdef LLAMA_CUBLAS MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include - MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib + MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib OBJS += ggml-cuda.o MK_NVCCFLAGS = -use_fast_math ifndef JETSON_EOL_MODULE_DETECT MK_NVCCFLAGS += --forward-unknown-to-host-compiler endif # JETSON_EOL_MODULE_DETECT - ifdef LLAMA_DEBUG MK_NVCCFLAGS += -lineinfo -endif - +endif # LLAMA_DEBUG ifdef LLAMA_CUDA_NVCC NVCC = $(LLAMA_CUDA_NVCC) else diff --git a/ggml-backend.c b/ggml-backend.c index 0c8c9ec43..526ce732b 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -297,7 +297,7 @@ static void ggml_backend_registry_init(void) { void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) { GGML_ASSERT(ggml_backend_registry_count < GGML_MAX_BACKENDS_REG); - int id = ggml_backend_registry_count; + size_t id = ggml_backend_registry_count; ggml_backend_registry[id] = (struct ggml_backend_reg) { /* .name = */ {0}, @@ -330,6 +330,8 @@ size_t ggml_backend_reg_find_by_name(const char * name) { return i; } } + + // not found return SIZE_MAX; } @@ -340,15 +342,15 @@ ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str) const char * params = strchr(backend_str, ':'); char backend_name[128]; if (params == NULL) { - strcpy(backend_name, backend_str); + snprintf(backend_name, sizeof(backend_name), "%s", backend_str); params = ""; } else { - strncpy(backend_name, backend_str, params - backend_str); - backend_name[params - backend_str] = '\0'; + snprintf(backend_name, sizeof(backend_name), "%.*s", (int)(params - backend_str), backend_str); params++; } size_t backend_i = ggml_backend_reg_find_by_name(backend_name); + if (backend_i == SIZE_MAX) { fprintf(stderr, "%s: backend %s not found\n", __func__, backend_name); return NULL; @@ -396,18 +398,12 @@ static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { } static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy((char *)tensor->data + offset, data, size); GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy(data, (const char *)tensor->data + offset, size); GGML_UNUSED(buffer); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f9830328b..ac3b3c14d 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -86,17 +86,28 @@ #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess #define __trap abort +#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS +#define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED +#define CUBLAS_STATUS_ALLOC_FAILED HIPBLAS_STATUS_ALLOC_FAILED +#define CUBLAS_STATUS_INVALID_VALUE HIPBLAS_STATUS_INVALID_VALUE +#define CUBLAS_STATUS_ARCH_MISMATCH HIPBLAS_STATUS_ARCH_MISMATCH +#define CUBLAS_STATUS_MAPPING_ERROR HIPBLAS_STATUS_MAPPING_ERROR +#define CUBLAS_STATUS_EXECUTION_FAILED HIPBLAS_STATUS_EXECUTION_FAILED +#define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR +#define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED #else #include +#include #include #include -// CUDA 10.2 does not have these macro definitions. -#ifndef CUBLAS_TF32_TENSOR_OP_MATH + +#if CUDART_VERSION < 11020 #define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH #define CUBLAS_COMPUTE_16F CUDA_R_16F #define CUBLAS_COMPUTE_32F CUDA_R_32F #define cublasComputeType_t cudaDataType_t -#endif +#endif // CUDART_VERSION < 11020 + #endif // defined(GGML_USE_HIPBLAS) #include "ggml-cuda.h" @@ -200,45 +211,45 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); -#define CUDA_CHECK(err) \ - do { \ - cudaError_t err_ = (err); \ - if (err_ != cudaSuccess) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ - cudaGetErrorString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"CUDA error"); \ - } \ - } while (0) - #if CUDART_VERSION >= 12000 -#define CUBLAS_CHECK(err) \ - do { \ - cublasStatus_t err_ = (err); \ - if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \ - err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"cuBLAS error"); \ - } \ - } while (0) + static const char * cublas_get_error_str(const cublasStatus_t err) { + return cublasGetStatusString(err); + } #else -#define CUBLAS_CHECK(err) \ - do { \ - cublasStatus_t err_ = (err); \ - if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"cuBLAS error"); \ - } \ - } while (0) -#endif // CUDART_VERSION >= 11 + static const char * cublas_get_error_str(const cublasStatus_t err) { + switch (err) { + case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; + default: return "unknown error"; + } + } +#endif // CUDART_VERSION >= 12000 + +[[noreturn]] +static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { + fprintf(stderr, "CUDA error: %s: %s\n", stmt, msg); + fprintf(stderr, " in function %s at %s:%d\n", func, file, line); + GGML_ASSERT(!"CUDA error"); +} + +#define CUDA_CHECK(err) do { auto err_ = (err); if (err_ != cudaSuccess) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cudaGetErrorString(err_)); } while (0) +#define CUBLAS_CHECK(err) do { auto err_ = (err); if (err_ != CUBLAS_STATUS_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cublas_get_error_str(err_)); } while (0) + +#if !defined(GGML_USE_HIPBLAS) +static const char * cu_get_error_str(CUresult err) { + const char * err_str; + cuGetErrorString(err, &err_str); + return err_str; +} +#define CU_CHECK(err) do { auto err_ = (err); if (err_ != CUDA_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cu_get_error_str(err_)); } while (0) +#endif #if CUDART_VERSION >= 11100 #define GGML_CUDA_ASSUME(x) __builtin_assume(x) @@ -516,9 +527,17 @@ inline cudaError_t ggml_cuda_set_device(const int device) { static int g_device_count = -1; static int g_main_device = 0; -static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES]; static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; +struct cuda_device_capabilities { + int cc; // compute capability + bool vmm; // virtual memory support + size_t vmm_granularity; // granularity of virtual memory +}; + +static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; + + static void * g_scratch_buffer = nullptr; static size_t g_scratch_size = 0; // disabled by default static size_t g_scratch_offset = 0; @@ -5875,7 +5894,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -5920,7 +5939,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -5965,7 +5984,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6010,7 +6029,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6055,7 +6074,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6100,7 +6119,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6147,7 +6166,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6193,7 +6212,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6238,7 +6257,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6283,7 +6302,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6543,21 +6562,24 @@ struct scoped_spin_lock { scoped_spin_lock& operator=(const scoped_spin_lock&) = delete; }; +static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; + +// #define DEBUG_CUDA_MALLOC struct cuda_buffer { void * ptr = nullptr; size_t size = 0; }; static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; -static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; +static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; -static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { +static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); #ifdef DEBUG_CUDA_MALLOC int nnz = 0; - size_t max_size = 0, tot_size = 0; + size_t max_size = 0; #endif size_t best_diff = 1ull << 36; int ibest = -1; @@ -6566,7 +6588,6 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { if (b.ptr != nullptr) { #ifdef DEBUG_CUDA_MALLOC ++nnz; - tot_size += b.size; if (b.size > max_size) max_size = b.size; #endif if (b.size >= size) { @@ -6593,19 +6614,20 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { b.size = 0; return ptr; } -#ifdef DEBUG_CUDA_MALLOC - fprintf(stderr, "%s: %d buffers, max_size = %u MB, tot_size = %u MB, requested %u MB\n", __func__, nnz, - (uint32_t)(max_size/1024/1024), (uint32_t)(tot_size/1024/1024), (uint32_t)(size/1024/1024)); -#endif void * ptr; size_t look_ahead_size = (size_t) (1.05 * size); look_ahead_size = 256 * ((look_ahead_size + 255)/256); CUDA_CHECK(cudaMalloc((void **) &ptr, look_ahead_size)); *actual_size = look_ahead_size; + g_cuda_pool_size[id] += look_ahead_size; +#ifdef DEBUG_CUDA_MALLOC + fprintf(stderr, "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz, + (uint32_t)(max_size/1024/1024), (uint32_t)(g_cuda_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024)); +#endif return ptr; } -static void ggml_cuda_pool_free(void * ptr, size_t size) { +static void ggml_cuda_pool_free_leg(void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); @@ -6620,8 +6642,152 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { } fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n"); CUDA_CHECK(cudaFree(ptr)); + g_cuda_pool_size[id] -= size; } +#if !defined(GGML_USE_HIPBLAS) +// pool with virtual memory +static std::vector g_cuda_pool_handles[GGML_CUDA_MAX_DEVICES]; +static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; +static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; +static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 36; // 64 GB + +static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { + scoped_spin_lock lock(g_cuda_pool_lock); + int id; + CUDA_CHECK(cudaGetDevice(&id)); + + // round up the allocation size to the alignment to ensure that all allocations are aligned for all data types + const size_t alignment = 128; + size = alignment * ((size + alignment - 1) / alignment); + + size_t avail = g_cuda_pool_size[id] - g_cuda_pool_used[id]; + + if (size > avail) { + // round up to the next multiple of the granularity + size_t reserve_size = size - avail; + const size_t granularity = g_device_caps[id].vmm_granularity; + reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); + + GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); + + // allocate more physical memory + CUmemAllocationProp prop = {}; + prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; + prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + prop.location.id = id; + CUmemGenericAllocationHandle handle; + CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); + + // reserve virtual address space (if not already reserved) + if (g_cuda_pool_addr[id] == 0) { + CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[id], CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); + } + + // map at the end of the pool + CU_CHECK(cuMemMap(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, 0, handle, 0)); + + // set access + CUmemAccessDesc access = {}; + access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + access.location.id = id; + access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; + CU_CHECK(cuMemSetAccess(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, &access, 1)); + + // add to the pool + g_cuda_pool_handles[id].push_back(handle); + g_cuda_pool_size[id] += reserve_size; + + //printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB)\n", + // id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), + // (unsigned long long) (reserve_size/1024/1024)); + } + + GGML_ASSERT(g_cuda_pool_addr[id] != 0); + + void * ptr = (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id]); + *actual_size = size; + g_cuda_pool_used[id] += size; + +#ifdef DEBUG_CUDA_MALLOC + printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr); +#endif + + return ptr; +} + +static void ggml_cuda_pool_free_vmm(void * ptr, size_t size) { + scoped_spin_lock lock(g_cuda_pool_lock); + int id; + CUDA_CHECK(cudaGetDevice(&id)); + +#ifdef DEBUG_CUDA_MALLOC + printf("cuda pool[%d]: freed %llu bytes at %llx\n", id, (unsigned long long) size, ptr); +#endif + + g_cuda_pool_used[id] -= size; + + // all deallocations must be in reverse order of the allocations + GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id])); +} + +static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { + int id; + CUDA_CHECK(cudaGetDevice(&id)); + if (g_device_caps[id].vmm) { + return ggml_cuda_pool_malloc_vmm(size, actual_size); + } else { + return ggml_cuda_pool_malloc_leg(size, actual_size); + } +} + +static void ggml_cuda_pool_free(void * ptr, size_t size) { + int id; + CUDA_CHECK(cudaGetDevice(&id)); + if (g_device_caps[id].vmm) { + ggml_cuda_pool_free_vmm(ptr, size); + } else { + ggml_cuda_pool_free_leg(ptr, size); + } +} +#else +#define ggml_cuda_pool_malloc ggml_cuda_pool_malloc_leg +#define ggml_cuda_pool_free ggml_cuda_pool_free_leg +#endif // !defined(GGML_USE_HIPBLAS) + +template +struct cuda_pool_alloc { + T * ptr = nullptr; + size_t actual_size = 0; + + // size is in number of elements + T * alloc(size_t size) { + GGML_ASSERT(ptr == nullptr); + ptr = (T *) ggml_cuda_pool_malloc(size * sizeof(T), &this->actual_size); + return ptr; + } + + cuda_pool_alloc(size_t size) { + alloc(size); + } + + ~cuda_pool_alloc() { + if (ptr != nullptr) { + ggml_cuda_pool_free(ptr, actual_size); + } + } + + T * get() { + return ptr; + } + + cuda_pool_alloc() = default; + cuda_pool_alloc(const cuda_pool_alloc &) = delete; + cuda_pool_alloc(cuda_pool_alloc &&) = delete; + cuda_pool_alloc& operator=(const cuda_pool_alloc &) = delete; + cuda_pool_alloc& operator=(cuda_pool_alloc &&) = delete; +}; + static bool g_cublas_loaded = false; bool ggml_cublas_loaded(void) { @@ -6660,16 +6826,33 @@ void ggml_init_cublas() { #endif fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); for (int id = 0; id < g_device_count; ++id) { + int device_vmm = 0; + +#if !defined(GGML_USE_HIPBLAS) + CUdevice device; + CU_CHECK(cuDeviceGet(&device, id)); + CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); + + if (device_vmm) { + CUmemAllocationProp alloc_prop = {}; + alloc_prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; + alloc_prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + alloc_prop.location.id = id; + CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); + } +#endif // !defined(GGML_USE_HIPBLAS) + g_device_caps[id].vmm = !!device_vmm; + cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); - fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor); + fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) - g_compute_capabilities[id] = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; + g_device_caps[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; #else - g_compute_capabilities[id] = 100*prop.major + 10*prop.minor; + g_device_caps[id].cc = 100*prop.major + 10*prop.minor; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } for (int id = 0; id < g_device_count; ++id) { @@ -7178,11 +7361,11 @@ static int64_t get_row_rounding(ggml_type type) { int64_t max_compute_capability = INT_MIN; for (int64_t id = 0; id < g_device_count; ++id) { if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { - if (min_compute_capability > g_compute_capabilities[id]) { - min_compute_capability = g_compute_capabilities[id]; + if (min_compute_capability > g_device_caps[id].cc) { + min_compute_capability = g_device_caps[id].cc; } - if (max_compute_capability < g_compute_capabilities[id]) { - max_compute_capability = g_compute_capabilities[id]; + if (max_compute_capability < g_device_caps[id].cc) { + max_compute_capability = g_device_caps[id].cc; } } } @@ -7297,8 +7480,8 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics #ifdef GGML_CUDA_F16 - size_t ash; - dfloat * src1_dfloat = nullptr; // dfloat == half + cuda_pool_alloc src1_dfloat_a; + half * src1_dfloat = nullptr; // dfloat == half bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 || @@ -7306,7 +7489,7 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16; if (src1_convert_f16) { - src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash); + src1_dfloat = src1_dfloat_a.alloc(ne00); ggml_cpy_f32_f16_cuda((const char *) src1_ddf_i, (char *) src1_dfloat, ne00, ne00, 1, sizeof(float), 0, 0, ne00, 1, sizeof(half), 0, 0, stream); @@ -7354,12 +7537,6 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( break; } -#ifdef GGML_CUDA_F16 - if (src1_convert_f16) { - ggml_cuda_pool_free(src1_dfloat, ash); - } -#endif // GGML_CUDA_F16 - (void) src1; (void) dst; (void) src1_ddq_i; @@ -7390,33 +7567,30 @@ inline void ggml_cuda_op_mul_mat_cublas( // ldc == nrows of the matrix that cuBLAS writes into int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff; - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 - half * src0_as_f16 = nullptr; - size_t src0_as = 0; + cuda_pool_alloc src0_as_f16; if (src0->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = row_diff*ne00; - src0_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src0_as); - to_fp16_cuda(src0_dd_i, src0_as_f16, ne, stream); + src0_as_f16.alloc(ne); + to_fp16_cuda(src0_dd_i, src0_as_f16.get(), ne, stream); } - const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16; + const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16.get(); - half * src1_as_f16 = nullptr; - size_t src1_as = 0; + cuda_pool_alloc src1_as_f16; if (src1->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = src1_ncols*ne10; - src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); - to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); + src1_as_f16.alloc(ne); + to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream); } - const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16; - size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); + const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get(); + cuda_pool_alloc dst_f16(row_diff*src1_ncols); const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; @@ -7425,36 +7599,25 @@ inline void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK( cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, - &alpha_f16, src0_ptr, CUDA_R_16F, ne00, - src1_ptr, CUDA_R_16F, ne10, - &beta_f16, dst_f16, CUDA_R_16F, ldc, + &alpha_f16, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); - - ggml_cuda_pool_free(dst_f16, dst_as); - - if (src0_as != 0) { - ggml_cuda_pool_free(src0_as_f16, src0_as); - } - - if (src1_as != 0) { - ggml_cuda_pool_free(src1_as_f16, src1_as); - } + to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); } else { - float * src0_ddq_as_f32 = nullptr; - size_t src0_as = 0; + cuda_pool_alloc src0_ddq_as_f32; if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); GGML_ASSERT(to_fp32_cuda != nullptr); - src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT - to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); + src0_ddq_as_f32.alloc(row_diff*ne00); + to_fp32_cuda(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); } - const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; + const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); const float alpha = 1.0f; const float beta = 0.0f; @@ -7466,10 +7629,6 @@ inline void ggml_cuda_op_mul_mat_cublas( &alpha, src0_ddf_i, ne00, src1_ddf_i, ne10, &beta, dst_dd_i, ldc)); - - if (src0_as != 0) { - ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); - } } (void) dst; @@ -7761,18 +7920,17 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s float * src1_ddf = nullptr; float * dst_ddf = nullptr; - // as = actual size - size_t src0_asf = 0; - size_t src1_asf = 0; - size_t dst_asf = 0; + cuda_pool_alloc src0_f; + cuda_pool_alloc src1_f; + cuda_pool_alloc dst_f; ggml_cuda_set_device(g_main_device); - const cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; + cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; if (src0_on_device) { src0_ddf = (float *) src0_extra->data_device[g_main_device]; } else { - src0_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_asf); + src0_ddf = src0_f.alloc(ggml_nelements(src0)); CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf, src0, 0, 0, 0, nrows0, main_stream)); } @@ -7780,14 +7938,14 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s if (src1_on_device) { src1_ddf = (float *) src1_extra->data_device[g_main_device]; } else { - src1_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf); + src1_ddf = src1_f.alloc(ggml_nelements(src1)); CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf, src1, 0, 0, 0, nrows1, main_stream)); } } if (dst_on_device) { dst_ddf = (float *) dst_extra->data_device[g_main_device]; } else { - dst_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(dst), &dst_asf); + dst_ddf = dst_f.alloc(ggml_nelements(dst)); } // do the computation @@ -7799,16 +7957,6 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s CUDA_CHECK(cudaMemcpyAsync(dst->data, dst_ddf, ggml_nbytes(dst), cudaMemcpyDeviceToHost, main_stream)); } - if (src0_asf > 0) { - ggml_cuda_pool_free(src0_ddf, src0_asf); - } - if (src1_asf > 0) { - ggml_cuda_pool_free(src1_ddf, src1_asf); - } - if (dst_asf > 0) { - ggml_cuda_pool_free(dst_ddf, dst_asf); - } - if (dst->backend == GGML_BACKEND_CPU) { CUDA_CHECK(cudaDeviceSynchronize()); } @@ -8122,17 +8270,17 @@ static void ggml_cuda_op_mul_mat( CUDA_CHECK(ggml_cuda_set_device(id)); // free buffers again when done - if (src0_as[id] > 0) { - ggml_cuda_pool_free(src0_dd[id], src0_as[id]); - } - if (src1_asf[id] > 0) { - ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); + if (dst_as[id] > 0) { + ggml_cuda_pool_free(dst_dd[id], dst_as[id]); } if (src1_asq[id] > 0) { ggml_cuda_pool_free(src1_ddq[id], src1_asq[id]); } - if (dst_as[id] > 0) { - ggml_cuda_pool_free(dst_dd[id], dst_as[id]); + if (src1_asf[id] > 0) { + ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); + } + if (src0_as[id] > 0) { + ggml_cuda_pool_free(src0_dd[id], src0_as[id]); } } @@ -8385,14 +8533,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); - size_t src1_as = 0; - half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as); - to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); + cuda_pool_alloc src1_as_f16(ne1); + to_fp16_cuda(src1_ddf, src1_as_f16.get(), ne1, main_stream); - size_t dst_as = 0; - - half * dst_f16 = nullptr; - char * dst_t = nullptr; + cuda_pool_alloc dst_f16; + char * dst_t; cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; cudaDataType_t cu_data_type = CUDA_R_16F; @@ -8411,8 +8556,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const void * beta = &beta_f16; if (dst->op_params[0] == GGML_PREC_DEFAULT) { - dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); - dst_t = (char *) dst_f16; + dst_t = (char *) dst_f16.alloc(ne); nbd2 /= sizeof(float) / sizeof(half); nbd3 /= sizeof(float) / sizeof(half); @@ -8459,9 +8603,9 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA - (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB - beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC + alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA + (const char *) src1_as_f16.get(), CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB + beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -8469,19 +8613,13 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const // use cublasGemmBatchedEx const int ne23 = ne12*ne13; - const void ** ptrs_src = nullptr; - void ** ptrs_dst = nullptr; - - size_t ptrs_src_s = 0; - size_t ptrs_dst_s = 0; - - ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s); - ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s); + cuda_pool_alloc ptrs_src(2*ne23); + cuda_pool_alloc< void *> ptrs_dst(1*ne23); dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_as_f16, src1_as_f16, dst_t, - ptrs_src, ptrs_dst, + src0_as_f16, src1_as_f16.get(), dst_t, + ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, @@ -8493,30 +8631,19 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float), - beta, ( void **) (ptrs_dst + 0*ne23), cu_data_type, ne01, + alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/sizeof(half), + (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne01, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - - if (ptrs_src_s != 0) { - ggml_cuda_pool_free(ptrs_src, ptrs_src_s); - } - if (ptrs_dst_s != 0) { - ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s); - } } #endif if (dst->op_params[0] == GGML_PREC_DEFAULT) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); - - ggml_cuda_pool_free(dst_f16, dst_as); + to_fp32_cuda(dst_f16.get(), dst_ddf, ne, main_stream); } - - ggml_cuda_pool_free(src1_as_f16, src1_as); } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -8529,8 +8656,8 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 int64_t min_compute_capability = INT_MAX; for (int64_t id = 0; id < g_device_count; ++id) { - if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { - min_compute_capability = g_compute_capabilities[id]; + if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { + min_compute_capability = g_device_caps[id].cc; } } @@ -8843,12 +8970,11 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); } } else { - size_t as_src1, as_dst; - char * src1_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(src1), &as_src1); - char * dst_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(dst), &as_dst); + cuda_pool_alloc src1_contiguous(sizeof(float)*ggml_nelements(src1)); + cuda_pool_alloc dst_contiguous(sizeof(float)*ggml_nelements(dst)); - src1_row_extra.data_device[g_main_device] = src1_contiguous; - dst_row_extra.data_device[g_main_device] = dst_contiguous; + src1_row_extra.data_device[g_main_device] = src1_contiguous.get(); + dst_row_extra.data_device[g_main_device] = dst_contiguous.get(); const cudaMemcpyKind src1_kind = src1->backend == GGML_BACKEND_CPU ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; @@ -8868,7 +8994,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); - CUDA_CHECK(cudaMemcpyAsync(src1_contiguous + num_src1_rows*nb11, src1_original + i01*nb11, + CUDA_CHECK(cudaMemcpyAsync(src1_contiguous.get() + num_src1_rows*nb11, src1_original + i01*nb11, nb11, src1_kind, stream)); num_src1_rows++; } @@ -8900,14 +9026,11 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); - CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous + num_src1_rows*nb1, + CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous.get() + num_src1_rows*nb1, nb1, dst_kind, stream)); num_src1_rows++; } } - - ggml_cuda_pool_free(src1_contiguous, as_src1); - ggml_cuda_pool_free(dst_contiguous, as_dst); } if (dst->backend == GGML_BACKEND_CPU) { @@ -9678,8 +9801,10 @@ static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buff static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * ptr = ggml_cuda_host_malloc(size); + if (ptr == nullptr) { - return nullptr; + // fallback to cpu buffer + return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } // FIXME: this is a hack to avoid having to implement a new buffer type diff --git a/ggml.c b/ggml.c index 3656422d7..73600ab05 100644 --- a/ggml.c +++ b/ggml.c @@ -19351,7 +19351,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; } gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); - free(data); + free((void *)data); } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) { GGML_ASSERT(false && "nested arrays not supported"); } else { diff --git a/ggml.h b/ggml.h index 338f355a4..67d6bc4f1 100644 --- a/ggml.h +++ b/ggml.h @@ -255,6 +255,8 @@ #define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached") #elif defined(__GNUC__) #define GGML_UNREACHABLE() __builtin_unreachable() +#elif defined(_MSC_VER) +#define GGML_UNREACHABLE() __assume(0) #else #define GGML_UNREACHABLE() ((void) 0) #endif diff --git a/llama.cpp b/llama.cpp index 5699a0fcf..a24621539 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1281,7 +1281,7 @@ struct llama_hparams { if (this->rope_finetuned != other.rope_finetuned) return true; if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true; - const float EPSILON = 1e-9; + const float EPSILON = 1e-9f; if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true; if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true; @@ -10300,7 +10300,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch std::string result = model->vocab.id_to_token[token].text; llama_unescape_whitespace(result); if (length < (int) result.length()) { - return -result.length(); + return -(int) result.length(); } memcpy(buf, result.c_str(), result.length()); return result.length(); @@ -10330,7 +10330,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch std::string result = model->vocab.id_to_token[token].text; result = llama_decode_text(result); if (length < (int) result.length()) { - return -result.length(); + return -(int) result.length(); } memcpy(buf, result.c_str(), result.length()); return result.length(); diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp index 14914def5..8ff76c891 100644 --- a/tests/test-grad0.cpp +++ b/tests/test-grad0.cpp @@ -883,9 +883,6 @@ int main(int argc, const char ** argv) { srand(seed); const int nargs = 1; - int64_t ne2[4]; - ne2[0] = 1; - for (int ndims = 1; ndims <= 2; ++ndims) { x[0] = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f); From 753be377b69bda2d65a7e089f2b7f0c53ef3495e Mon Sep 17 00:00:00 2001 From: Shintarou Okada Date: Sun, 24 Dec 2023 22:35:49 +0900 Subject: [PATCH 53/84] llama : add PLaMo model (#3557) * add plamo mock * add tensor loading * plamo convert * update norm * able to compile * fix norm_rms_eps hparam * runnable * use inp_pos * seems ok * update kqv code * remove develop code * update README * shuffle attn_q.weight and attn_output.weight for broadcasting * remove plamo_llm_build_kqv and use llm_build_kqv * fix style * update * llama : remove obsolete KQ_scale * plamo : fix tensor names for correct GPU offload --------- Co-authored-by: Georgi Gerganov --- README.md | 1 + convert-hf-to-gguf.py | 86 +++++++++++++++- gguf-py/gguf/constants.py | 17 ++++ gguf-py/gguf/tensor_mapping.py | 37 ++++--- llama.cpp | 181 +++++++++++++++++++++++++++++++++ 5 files changed, 307 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 649c3b333..09338d226 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,7 @@ as the main playground for developing new features for the [ggml](https://github - [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek) - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) - [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral) +- [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) **Multimodal models:** diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index e71a96c48..303d08170 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -184,6 +184,8 @@ class Model: return MixtralModel if model_architecture == "PhiForCausalLM": return Phi2Model + if model_architecture == "PlamoForCausalLM": + return PlamoModel return Model def _is_model_safetensors(self) -> bool: @@ -225,6 +227,8 @@ class Model: return gguf.MODEL_ARCH.LLAMA if arch == "PhiForCausalLM": return gguf.MODEL_ARCH.PHI2 + if arch == "PlamoForCausalLM": + return gguf.MODEL_ARCH.PLAMO raise NotImplementedError(f'Architecture "{arch}" not supported!') @@ -1002,11 +1006,91 @@ class Phi2Model(Model): self.gguf_writer.add_add_bos_token(False) +class PlamoModel(Model): + def set_vocab(self): + self._set_vocab_sentencepiece() + + def set_gguf_parameters(self): + hparams = self.hparams + block_count = hparams["num_hidden_layers"] + + self.gguf_writer.add_name("PLaMo") + self.gguf_writer.add_context_length(4096) # not in config.json + self.gguf_writer.add_embedding_length(hparams["hidden_size"]) + self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(hparams["num_attention_heads"]) + self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong + self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) + + def shuffle_attn_q_weight(self, data_torch): + assert data_torch.size() == (5120, 5120) + data_torch = data_torch.reshape(8, 5, 128, 5120) + data_torch = torch.permute(data_torch, (1, 0, 2, 3)) + data_torch = torch.reshape(data_torch, (5120, 5120)) + return data_torch + + def shuffle_attn_output_weight(self, data_torch): + assert data_torch.size() == (5120, 5120) + data_torch = data_torch.reshape(5120, 8, 5, 128) + data_torch = torch.permute(data_torch, (0, 2, 1, 3)) + data_torch = torch.reshape(data_torch, (5120, 5120)) + return data_torch + + def write_tensors(self): + block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + for name, data_torch in self.get_tensors(): + if "self_attn.rotary_emb.inv_freq" in name: + continue + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + # shuffle for broadcasting of gqa in ggml_mul_mat + if new_name.endswith("attn_q.weight"): + data_torch = self.shuffle_attn_q_weight(data_torch) + elif new_name.endswith("attn_output.weight"): + data_torch = self.shuffle_attn_output_weight(data_torch) + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + ###### CONVERSION LOGIC ###### def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert a huggingface model to a GGML compatible file") + parser = argparse.ArgumentParser( + description="Convert a huggingface model to a GGML compatible file") parser.add_argument( "--vocab-only", action="store_true", help="extract only the vocab", diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 390dca049..4cd87cdda 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -96,6 +96,7 @@ class MODEL_ARCH(IntEnum): STABLELM = auto() QWEN = auto() PHI2 = auto() + PLAMO = auto() class MODEL_TENSOR(IntEnum): @@ -142,6 +143,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.STABLELM: "stablelm", MODEL_ARCH.QWEN: "qwen", MODEL_ARCH.PHI2: "phi2", + MODEL_ARCH.PLAMO: "plamo", } TENSOR_NAMES: dict[MODEL_TENSOR, str] = { @@ -349,6 +351,21 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.PLAMO: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], MODEL_ARCH.GPT2: [ # TODO ], diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 6fcbdbc1c..446c6b688 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -79,6 +79,7 @@ class TensorNameMap: "language_model.encoder.layers.{bid}.input_layernorm", # persimmon "model.layers.{bid}.ln1", # yi "transformer.h.{bid}.ln", # phi2 + "model.layers.layers.{bid}.norm", # plamo ), # Attention norm 2 @@ -99,26 +100,29 @@ class TensorNameMap: # Attention query MODEL_TENSOR.ATTN_Q: ( - "model.layers.{bid}.self_attn.q_proj", # llama-hf - "layers.{bid}.attention.wq", # llama-pth - "encoder.layer.{bid}.attention.self.query", # bert - "transformer.h.{bid}.attn.q_proj", # gpt-j + "model.layers.{bid}.self_attn.q_proj", # llama-hf + "layers.{bid}.attention.wq", # llama-pth + "encoder.layer.{bid}.attention.self.query", # bert + "transformer.h.{bid}.attn.q_proj", # gpt-j + "model.layers.layers.{bid}.self_attn.q_proj", # plamo ), # Attention key MODEL_TENSOR.ATTN_K: ( - "model.layers.{bid}.self_attn.k_proj", # llama-hf - "layers.{bid}.attention.wk", # llama-pth - "encoder.layer.{bid}.attention.self.key", # bert - "transformer.h.{bid}.attn.k_proj", # gpt-j + "model.layers.{bid}.self_attn.k_proj", # llama-hf + "layers.{bid}.attention.wk", # llama-pth + "encoder.layer.{bid}.attention.self.key", # bert + "transformer.h.{bid}.attn.k_proj", # gpt-j + "model.layers.layers.{bid}.self_attn.k_proj", # plamo ), # Attention value MODEL_TENSOR.ATTN_V: ( - "model.layers.{bid}.self_attn.v_proj", # llama-hf - "layers.{bid}.attention.wv", # llama-pth - "encoder.layer.{bid}.attention.self.value", # bert - "transformer.h.{bid}.attn.v_proj", # gpt-j + "model.layers.{bid}.self_attn.v_proj", # llama-hf + "layers.{bid}.attention.wv", # llama-pth + "encoder.layer.{bid}.attention.self.value", # bert + "transformer.h.{bid}.attn.v_proj", # gpt-j + "model.layers.layers.{bid}.self_attn.v_proj", # plamo ), # Attention output @@ -134,12 +138,14 @@ class TensorNameMap: "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon "transformer.h.{bid}.mixer.out_proj", # phi2 + "model.layers.layers.{bid}.self_attn.o_proj", # plamo ), # Rotary embeddings MODEL_TENSOR.ATTN_ROT_EMBD: ( - "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf - "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf + "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo ), # Feed-forward norm @@ -174,6 +180,7 @@ class TensorNameMap: "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon "transformer.h.{bid}.mlp.w1", # qwen "transformer.h.{bid}.mlp.fc1", # phi2 + "model.layers.layers.{bid}.mlp.up_proj", # plamo ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -186,6 +193,7 @@ class TensorNameMap: "model.layers.{bid}.mlp.gate_proj", # llama-hf refact "layers.{bid}.feed_forward.w1", # llama-pth "transformer.h.{bid}.mlp.w2", # qwen + "model.layers.layers.{bid}.mlp.gate_proj", # plamo ), MODEL_TENSOR.FFN_GATE_EXP: ( @@ -206,6 +214,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.fc_out", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon "transformer.h.{bid}.mlp.fc2", # phi2 + "model.layers.layers.{bid}.mlp.down_proj", # plamo ), MODEL_TENSOR.FFN_DOWN_EXP: ( diff --git a/llama.cpp b/llama.cpp index a24621539..0b99f1e03 100644 --- a/llama.cpp +++ b/llama.cpp @@ -198,6 +198,7 @@ enum llm_arch { LLM_ARCH_STABLELM, LLM_ARCH_QWEN, LLM_ARCH_PHI2, + LLM_ARCH_PLAMO, LLM_ARCH_UNKNOWN, }; @@ -216,6 +217,7 @@ static std::map LLM_ARCH_NAMES = { { LLM_ARCH_STABLELM, "stablelm" }, { LLM_ARCH_QWEN, "qwen" }, { LLM_ARCH_PHI2, "phi2" }, + { LLM_ARCH_PLAMO, "plamo" }, }; enum llm_kv { @@ -567,6 +569,24 @@ static std::map> LLM_TENSOR_NAMES = { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PLAMO, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_UNKNOWN, @@ -2749,6 +2769,15 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_PLAMO: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; default: (void)0; } @@ -3630,6 +3659,51 @@ static bool llm_load_tensors( layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); } } break; + case LLM_ARCH_PLAMO: + { + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + + // output + { + ggml_backend_type backend_norm; + ggml_backend_type backend_output; + + if (n_gpu_layers > int(n_layer)) { + backend_norm = llama_backend_offload; + backend_output = llama_backend_offload_split; + } else { + backend_norm = GGML_BACKEND_CPU; + backend_output = GGML_BACKEND_CPU; + } + + model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); + model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + } + + const uint32_t n_ff = hparams.n_ff; + + const int i_gpu_start = n_layer - n_gpu_layers; + + model.layers.resize(n_layer); + + for (uint32_t i = 0; i < n_layer; ++i) { + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + + auto & layer = model.layers[i]; + + layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + + layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); + layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + + layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -5555,6 +5629,109 @@ struct llm_build_context { return gf; } + + struct ggml_cgraph * build_plamo() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); + + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } + + for (int il = 0; il < n_layer; ++il) { + + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); + + struct ggml_tensor * attention_norm = cur; + + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, + n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, + n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(ctx0, model, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + cb(cur, "kqv_out", il); + } + struct ggml_tensor * sa_out = cur; + + cur = attention_norm; + + // feed-forward network + { + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, sa_out); + cb(cur, "l_out", il); + + cur = ggml_add(ctx0, cur, inpL); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } }; // @@ -6065,6 +6242,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_phi2(); } break; + case LLM_ARCH_PLAMO: + { + result = llm.build_plamo(); + } break; default: GGML_ASSERT(false); } From b9f47952ffae4e0d3420905526003c23333f6c98 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 21:01:12 +0100 Subject: [PATCH 54/84] simplify bug issue template (#4623) --- .github/ISSUE_TEMPLATE/bug.md | 177 +--------------------------------- 1 file changed, 1 insertion(+), 176 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index c003fe7c1..ce69e6395 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -6,179 +6,4 @@ assignees: '' --- -# Prerequisites - -Please answer the following questions for yourself before submitting an issue. - -- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now. -- [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md). -- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed). -- [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share. - -# Expected Behavior - -Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do. - -# Current Behavior - -Please provide a detailed written description of what `llama.cpp` did, instead. - -# Environment and Context - -Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions. - -* Physical (or virtual) hardware you are using, e.g. for Linux: - -`$ lscpu` - -* Operating System, e.g. for Linux: - -`$ uname -a` - -* SDK version, e.g. for Linux: - -``` -$ python3 --version -$ make --version -$ g++ --version -``` - -# Failure Information (for bugs) - -Please help provide information about the failure / bug. - -# Steps to Reproduce - -Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better. - -1. step 1 -2. step 2 -3. step 3 -4. etc. - -# Failure Logs - -Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes. - -Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability. - -Example environment info: -``` -llama.cpp$ git log | head -1 -commit 2af23d30434a677c6416812eea52ccc0af65119c - -llama.cpp$ lscpu | egrep "AMD|Flags" -Vendor ID: AuthenticAMD -Model name: AMD Ryzen Threadripper 1950X 16-Core Processor -Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 xsaves clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sme sev -Virtualization: AMD-V - -llama.cpp$ python3 --version -Python 3.10.9 - -llama.cpp$ pip list | egrep "torch|numpy|sentencepiece" -numpy 1.24.2 -numpydoc 1.5.0 -sentencepiece 0.1.97 -torch 1.13.1 -torchvision 0.14.1 - -llama.cpp$ make --version | head -1 -GNU Make 4.3 - -$ md5sum ./models/65B/ggml-model-q4_0.bin -dbdd682cce80e2d6e93cefc7449df487 ./models/65B/ggml-model-q4_0.bin -``` - -Example run with the Linux command [perf](https://www.brendangregg.com/perf.html) -``` -llama.cpp$ perf stat ./main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p "Please close your issue when it has been answered." -main: seed = 1679149377 -llama_model_load: loading model from './models/65B/ggml-model-q4_0.bin' - please wait ... -llama_model_load: n_vocab = 32000 -llama_model_load: n_ctx = 512 -llama_model_load: n_embd = 8192 -llama_model_load: n_mult = 256 -llama_model_load: n_head = 64 -llama_model_load: n_layer = 80 -llama_model_load: n_rot = 128 -llama_model_load: f16 = 2 -llama_model_load: n_ff = 22016 -llama_model_load: n_parts = 8 -llama_model_load: ggml ctx size = 41477.73 MB -llama_model_load: memory_size = 2560.00 MB, n_mem = 40960 -llama_model_load: loading model part 1/8 from './models/65B/ggml-model-q4_0.bin' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 2/8 from './models/65B/ggml-model-q4_0.bin.1' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 3/8 from './models/65B/ggml-model-q4_0.bin.2' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 4/8 from './models/65B/ggml-model-q4_0.bin.3' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 5/8 from './models/65B/ggml-model-q4_0.bin.4' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 6/8 from './models/65B/ggml-model-q4_0.bin.5' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 7/8 from './models/65B/ggml-model-q4_0.bin.6' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.7' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 - -system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 | - -main: prompt: 'Please close your issue when it has been answered.' -main: number of tokens in prompt = 11 - 1 -> '' - 12148 -> 'Please' - 3802 -> ' close' - 596 -> ' your' - 2228 -> ' issue' - 746 -> ' when' - 372 -> ' it' - 756 -> ' has' - 1063 -> ' been' - 7699 -> ' answered' - 29889 -> '.' - -sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000, repeat_last_n = 64, repeat_penalty = 1.300000 - - -Please close your issue when it has been answered. -@duncan-donut: I'm trying to figure out what kind of "support" you need for this script and why, exactly? Is there a question about how the code works that hasn't already been addressed in one or more comments below this ticket, or are we talking something else entirely like some sorta bugfixing job because your server setup is different from mine?? -I can understand if your site needs to be running smoothly and you need help with a fix of sorts but there should really be nothing wrong here that the code itself could not handle. And given that I'm getting reports about how it works perfectly well on some other servers, what exactly are we talking? A detailed report will do wonders in helping us get this resolved for ya quickly so please take your time and describe the issue(s) you see as clearly & concisely as possible!! -@duncan-donut: I'm not sure if you have access to cPanel but you could try these instructions. It is worth a shot! Let me know how it goes (or what error message, exactly!) when/if ya give that code a go? [end of text] - - -main: mem per token = 71159620 bytes -main: load time = 19309.95 ms -main: sample time = 168.62 ms -main: predict time = 223895.61 ms / 888.47 ms per token -main: total time = 246406.42 ms - - Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.': - - 3636882.89 msec task-clock # 14.677 CPUs utilized - 13509 context-switches # 3.714 /sec - 2436 cpu-migrations # 0.670 /sec - 10476679 page-faults # 2.881 K/sec - 13133115082869 cycles # 3.611 GHz (16.77%) - 29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%) - 10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%) - 23479217109614 instructions # 1.79 insn per cycle - # 0.44 stalled cycles per insn (16.76%) - 2353072268027 branches # 647.002 M/sec (16.77%) - 1998682780 branch-misses # 0.08% of all branches (16.76%) - - 247.802177522 seconds time elapsed - - 3618.573072000 seconds user - 18.491698000 seconds sys -``` +Please include information about your system, the steps to reproduce the bug, and the version of llama.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug. From a206137f927daef1752753cf5e281220b449a468 Mon Sep 17 00:00:00 2001 From: Paul Tsochantaris Date: Mon, 25 Dec 2023 16:09:53 +0000 Subject: [PATCH 55/84] Adding Emeltal reference to UI list (#4629) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 09338d226..3b202a336 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,7 @@ as the main playground for developing new features for the [ggml](https://github - [withcatai/catai](https://github.com/withcatai/catai) - [semperai/amica](https://github.com/semperai/amica) - [psugihara/FreeChat](https://github.com/psugihara/FreeChat) +- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) --- From 77465dad48d7c945c367ab46b6f2ea98ae9b7b15 Mon Sep 17 00:00:00 2001 From: FantasyGmm <16450052+FantasyGmm@users.noreply.github.com> Date: Tue, 26 Dec 2023 18:38:36 +0800 Subject: [PATCH 56/84] Fix new CUDA10 compilation errors (#4635) --- ggml-cuda.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index ac3b3c14d..f32e83ab6 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -102,6 +102,7 @@ #include #if CUDART_VERSION < 11020 +#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED #define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH #define CUBLAS_COMPUTE_16F CUDA_R_16F #define CUBLAS_COMPUTE_32F CUDA_R_32F From de8e496437c59e7d1cc84109e3e49a3478aee25a Mon Sep 17 00:00:00 2001 From: WillCorticesAI <150854901+WillCorticesAI@users.noreply.github.com> Date: Tue, 26 Dec 2023 05:42:08 -0500 Subject: [PATCH 57/84] Update comment for AdamW implementation reference. (#4604) Co-authored-by: Will Findley --- ggml.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index 73600ab05..d24560480 100644 --- a/ggml.c +++ b/ggml.c @@ -17456,9 +17456,9 @@ static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g } // -// ADAM +// Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf // -// ref: https://arxiv.org/pdf/1412.6980.pdf +// (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf) // static enum ggml_opt_result ggml_opt_adam( From dc68f0054cd279cddddb0cae0c9ef4f9cbaa512a Mon Sep 17 00:00:00 2001 From: slaren Date: Tue, 26 Dec 2023 21:23:59 +0100 Subject: [PATCH 58/84] cuda : fix vmm pool with multi GPU (#4620) * cuda : fix vmm pool with multi GPU * hip * use recommended granularity instead of minimum * better error checking * fix mixtral * use cudaMemcpy3DPeerAsync * use cuda_pool_alloc in ggml_cuda_op_mul_mat * consolidate error checking in ggml_cuda_set_device * remove unnecessary inlines ggml-ci * style fixes * only use vmm for the main device * fix scratch buffer size, re-enable vmm pool for all devices * remove unnecessary check id != g_main_device --- ggml-cuda.cu | 483 +++++++++++++++++++++++++-------------------------- ggml.c | 3 - llama.cpp | 3 +- 3 files changed, 243 insertions(+), 246 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f32e83ab6..abad9cc39 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -68,8 +68,9 @@ #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) #endif #define cudaMemcpy hipMemcpy -#define cudaMemcpy2DAsync hipMemcpy2DAsync #define cudaMemcpyAsync hipMemcpyAsync +#define cudaMemcpyPeerAsync hipMemcpyPeerAsync +#define cudaMemcpy2DAsync hipMemcpy2DAsync #define cudaMemcpyDeviceToDevice hipMemcpyDeviceToDevice #define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost #define cudaMemcpyHostToDevice hipMemcpyHostToDevice @@ -163,7 +164,7 @@ static __device__ __forceinline__ int __vsubss4(const int a, const int b) { const int8x4_t vb = reinterpret_cast(b); #if __has_builtin(__builtin_elementwise_sub_sat) const int8x4_t c = __builtin_elementwise_sub_sat(va, vb); - return reinterpret_cast(c); + return reinterpret_cast(c); #else int8x4_t c; int16_t tmp; @@ -174,7 +175,7 @@ static __device__ __forceinline__ int __vsubss4(const int a, const int b) { if(tmp < std::numeric_limits::min()) tmp = std::numeric_limits::min(); c[i] = tmp; } - return reinterpret_cast(c); + return reinterpret_cast(c); #endif // __has_builtin(__builtin_elementwise_sub_sat) } @@ -212,6 +213,28 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); +[[noreturn]] +static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { + int id = -1; // in case cudaGetDevice fails + cudaGetDevice(&id); + + fprintf(stderr, "CUDA error: %s\n", msg); + fprintf(stderr, " current device: %d, in function %s at %s:%d\n", id, func, file, line); + fprintf(stderr, " %s\n", stmt); + // abort with GGML_ASSERT to get a stack trace + GGML_ASSERT(!"CUDA error"); +} + +#define CUDA_CHECK_GEN(err, success, error_fn) \ + do { \ + auto err_ = (err); \ + if (err_ != (success)) { \ + ggml_cuda_error(#err, __func__, __FILE__, __LINE__, error_fn(err_)); \ + } \ + } while (0) + +#define CUDA_CHECK(err) CUDA_CHECK_GEN(err, cudaSuccess, cudaGetErrorString) + #if CUDART_VERSION >= 12000 static const char * cublas_get_error_str(const cublasStatus_t err) { return cublasGetStatusString(err); @@ -233,15 +256,7 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); } #endif // CUDART_VERSION >= 12000 -[[noreturn]] -static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { - fprintf(stderr, "CUDA error: %s: %s\n", stmt, msg); - fprintf(stderr, " in function %s at %s:%d\n", func, file, line); - GGML_ASSERT(!"CUDA error"); -} - -#define CUDA_CHECK(err) do { auto err_ = (err); if (err_ != cudaSuccess) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cudaGetErrorString(err_)); } while (0) -#define CUBLAS_CHECK(err) do { auto err_ = (err); if (err_ != CUBLAS_STATUS_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cublas_get_error_str(err_)); } while (0) +#define CUBLAS_CHECK(err) CUDA_CHECK_GEN(err, CUBLAS_STATUS_SUCCESS, cublas_get_error_str) #if !defined(GGML_USE_HIPBLAS) static const char * cu_get_error_str(CUresult err) { @@ -249,7 +264,7 @@ static const char * cu_get_error_str(CUresult err) { cuGetErrorString(err, &err_str); return err_str; } -#define CU_CHECK(err) do { auto err_ = (err); if (err_ != CUDA_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cu_get_error_str(err_)); } while (0) +#define CU_CHECK(err) CUDA_CHECK_GEN(err, CUDA_SUCCESS, cu_get_error_str) #endif #if CUDART_VERSION >= 11100 @@ -306,10 +321,10 @@ typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * s typedef void (*ggml_cuda_op_mul_mat_t)( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, const cudaStream_t & stream); + const int64_t src1_padded_row_size, cudaStream_t stream); typedef void (*ggml_cuda_op_flatten_t)( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream); + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream); // QK = number of values after dequantization // QR = QK / number of values before dequantization @@ -515,15 +530,15 @@ struct ggml_tensor_extra_gpu { // this is faster on Windows // probably because the Windows CUDA libraries forget to make this check before invoking the drivers -inline cudaError_t ggml_cuda_set_device(const int device) { +static void ggml_cuda_set_device(const int device) { int current_device; CUDA_CHECK(cudaGetDevice(¤t_device)); if (device == current_device) { - return cudaSuccess; + return; } - return cudaSetDevice(device); + CUDA_CHECK(cudaSetDevice(device)); } static int g_device_count = -1; @@ -538,7 +553,6 @@ struct cuda_device_capabilities { static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; - static void * g_scratch_buffer = nullptr; static size_t g_scratch_size = 0; // disabled by default static size_t g_scratch_offset = 0; @@ -580,6 +594,7 @@ static __device__ __forceinline__ float warp_reduce_max(float x) { static __device__ __forceinline__ float op_repeat(const float a, const float b) { return b; + GGML_UNUSED(a); } static __device__ __forceinline__ float op_add(const float a, const float b) { @@ -701,7 +716,7 @@ static __global__ void silu_f32(const float * x, float * dst, const int k) { dst[i] = x[i] / (1.0f + expf(-x[i])); } -static __global__ void gelu_quick_f32(const float *x, float *dst, int k) { +static __global__ void gelu_quick_f32(const float * x, float * dst, int k) { const float GELU_QUICK_COEF = -1.702f; const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { @@ -710,7 +725,7 @@ static __global__ void gelu_quick_f32(const float *x, float *dst, int k) { dst[i] = x[i] * (1.0f / (1.0f + expf(GELU_QUICK_COEF * x[i]))); } -static __global__ void tanh_f32(const float *x, float *dst, int k) { +static __global__ void tanh_f32(const float * x, float * dst, int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; @@ -727,7 +742,7 @@ static __global__ void relu_f32(const float * x, float * dst, const int k) { dst[i] = fmaxf(x[i], 0); } -static __global__ void leaky_relu_f32(const float *x, float *dst, const int k, const float negative_slope) { +static __global__ void leaky_relu_f32(const float * x, float * dst, const int k, const float negative_slope) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; @@ -780,7 +795,7 @@ static __global__ void norm_f32(const float * x, float * dst, const int ncols, c } } -static __global__ void concat_f32(const float *x,const float *y, float *dst, const int ne0, const int ne02) { +static __global__ void concat_f32(const float * x,const float * y, float * dst, const int ne0, const int ne02) { int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; @@ -805,7 +820,7 @@ static __global__ void concat_f32(const float *x,const float *y, float *dst, c } } -static __global__ void upscale_f32(const float *x, float *dst, const int ne00, const int nb02, const int scale_factor) { +static __global__ void upscale_f32(const float * x, float * dst, const int ne00, const int nb02, const int scale_factor) { int ne0 = ne00 * scale_factor; int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { @@ -825,7 +840,7 @@ static __global__ void upscale_f32(const float *x, float *dst, const int ne00, dst[offset_dst] = x[offset_src]; } -static __global__ void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02) { +static __global__ void pad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02) { int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; @@ -4727,7 +4742,6 @@ static __global__ void mul_mat_p021_f16_f32( const int row_y = col_x; - // y is not transposed but permuted const int iy = channel*nrows_y + row_y; @@ -5402,7 +5416,7 @@ struct bin_bcast_cuda { cne[3] = 1; }; - auto collapse_nb = [](size_t cnb[], int64_t cne[]) { + auto collapse_nb = [](size_t cnb[], const int64_t cne[]) { cnb[1] *= cne[1]; cnb[2] *= cne[2]; cnb[3] *= cne[3]; @@ -6566,18 +6580,16 @@ struct scoped_spin_lock { static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; // #define DEBUG_CUDA_MALLOC -struct cuda_buffer { +struct ggml_cuda_buffer { void * ptr = nullptr; size_t size = 0; }; -static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; +static ggml_cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; -static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { +static void * ggml_cuda_pool_malloc_leg(int device, size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); - int id; - CUDA_CHECK(cudaGetDevice(&id)); #ifdef DEBUG_CUDA_MALLOC int nnz = 0; size_t max_size = 0; @@ -6585,7 +6597,7 @@ static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { size_t best_diff = 1ull << 36; int ibest = -1; for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) { - cuda_buffer& b = g_cuda_buffer_pool[id][i]; + ggml_cuda_buffer& b = g_cuda_buffer_pool[device][i]; if (b.ptr != nullptr) { #ifdef DEBUG_CUDA_MALLOC ++nnz; @@ -6608,7 +6620,7 @@ static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { } } if (ibest >= 0) { - cuda_buffer& b = g_cuda_buffer_pool[id][ibest]; + ggml_cuda_buffer& b = g_cuda_buffer_pool[device][ibest]; void * ptr = b.ptr; *actual_size = b.size; b.ptr = nullptr; @@ -6618,9 +6630,10 @@ static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { void * ptr; size_t look_ahead_size = (size_t) (1.05 * size); look_ahead_size = 256 * ((look_ahead_size + 255)/256); + ggml_cuda_set_device(device); CUDA_CHECK(cudaMalloc((void **) &ptr, look_ahead_size)); *actual_size = look_ahead_size; - g_cuda_pool_size[id] += look_ahead_size; + g_cuda_pool_size[device] += look_ahead_size; #ifdef DEBUG_CUDA_MALLOC fprintf(stderr, "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz, (uint32_t)(max_size/1024/1024), (uint32_t)(g_cuda_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024)); @@ -6628,13 +6641,11 @@ static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { return ptr; } -static void ggml_cuda_pool_free_leg(void * ptr, size_t size) { +static void ggml_cuda_pool_free_leg(int device, void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); - int id; - CUDA_CHECK(cudaGetDevice(&id)); for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) { - cuda_buffer& b = g_cuda_buffer_pool[id][i]; + ggml_cuda_buffer& b = g_cuda_buffer_pool[device][i]; if (b.ptr == nullptr) { b.ptr = ptr; b.size = size; @@ -6642,73 +6653,73 @@ static void ggml_cuda_pool_free_leg(void * ptr, size_t size) { } } fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n"); + ggml_cuda_set_device(device); CUDA_CHECK(cudaFree(ptr)); - g_cuda_pool_size[id] -= size; + g_cuda_pool_size[device] -= size; } #if !defined(GGML_USE_HIPBLAS) // pool with virtual memory -static std::vector g_cuda_pool_handles[GGML_CUDA_MAX_DEVICES]; static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 36; // 64 GB -static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { +static void * ggml_cuda_pool_malloc_vmm(int device, size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); - int id; - CUDA_CHECK(cudaGetDevice(&id)); // round up the allocation size to the alignment to ensure that all allocations are aligned for all data types const size_t alignment = 128; size = alignment * ((size + alignment - 1) / alignment); - size_t avail = g_cuda_pool_size[id] - g_cuda_pool_used[id]; + size_t avail = g_cuda_pool_size[device] - g_cuda_pool_used[device]; if (size > avail) { // round up to the next multiple of the granularity size_t reserve_size = size - avail; - const size_t granularity = g_device_caps[id].vmm_granularity; + const size_t granularity = g_device_caps[device].vmm_granularity; reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); - GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); + GGML_ASSERT(g_cuda_pool_size[device] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); // allocate more physical memory CUmemAllocationProp prop = {}; prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; - prop.location.id = id; + prop.location.id = device; CUmemGenericAllocationHandle handle; CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); // reserve virtual address space (if not already reserved) - if (g_cuda_pool_addr[id] == 0) { - CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[id], CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); + if (g_cuda_pool_addr[device] == 0) { + CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[device], CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); } // map at the end of the pool - CU_CHECK(cuMemMap(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, 0, handle, 0)); + CU_CHECK(cuMemMap(g_cuda_pool_addr[device] + g_cuda_pool_size[device], reserve_size, 0, handle, 0)); + + // the memory allocation handle is no longer needed after mapping + CU_CHECK(cuMemRelease(handle)); // set access CUmemAccessDesc access = {}; access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; - access.location.id = id; + access.location.id = device; access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; - CU_CHECK(cuMemSetAccess(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, &access, 1)); + CU_CHECK(cuMemSetAccess(g_cuda_pool_addr[device] + g_cuda_pool_size[device], reserve_size, &access, 1)); // add to the pool - g_cuda_pool_handles[id].push_back(handle); - g_cuda_pool_size[id] += reserve_size; + g_cuda_pool_size[device] += reserve_size; //printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB)\n", // id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), // (unsigned long long) (reserve_size/1024/1024)); } - GGML_ASSERT(g_cuda_pool_addr[id] != 0); + GGML_ASSERT(g_cuda_pool_addr[device] != 0); - void * ptr = (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id]); + void * ptr = (void *) (g_cuda_pool_addr[device] + g_cuda_pool_used[device]); *actual_size = size; - g_cuda_pool_used[id] += size; + g_cuda_pool_used[device] += size; #ifdef DEBUG_CUDA_MALLOC printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr); @@ -6717,38 +6728,32 @@ static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { return ptr; } -static void ggml_cuda_pool_free_vmm(void * ptr, size_t size) { +static void ggml_cuda_pool_free_vmm(int device, void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); - int id; - CUDA_CHECK(cudaGetDevice(&id)); #ifdef DEBUG_CUDA_MALLOC printf("cuda pool[%d]: freed %llu bytes at %llx\n", id, (unsigned long long) size, ptr); #endif - g_cuda_pool_used[id] -= size; + g_cuda_pool_used[device] -= size; // all deallocations must be in reverse order of the allocations - GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id])); + GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[device] + g_cuda_pool_used[device])); } -static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); - if (g_device_caps[id].vmm) { - return ggml_cuda_pool_malloc_vmm(size, actual_size); +static void * ggml_cuda_pool_malloc(int device, size_t size, size_t * actual_size) { + if (g_device_caps[device].vmm) { + return ggml_cuda_pool_malloc_vmm(device, size, actual_size); } else { - return ggml_cuda_pool_malloc_leg(size, actual_size); + return ggml_cuda_pool_malloc_leg(device, size, actual_size); } } -static void ggml_cuda_pool_free(void * ptr, size_t size) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); - if (g_device_caps[id].vmm) { - ggml_cuda_pool_free_vmm(ptr, size); +static void ggml_cuda_pool_free(int device, void * ptr, size_t size) { + if (g_device_caps[device].vmm) { + ggml_cuda_pool_free_vmm(device, ptr, size); } else { - ggml_cuda_pool_free_leg(ptr, size); + ggml_cuda_pool_free_leg(device, ptr, size); } } #else @@ -6758,13 +6763,15 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { template struct cuda_pool_alloc { + int device = -1; T * ptr = nullptr; size_t actual_size = 0; // size is in number of elements T * alloc(size_t size) { GGML_ASSERT(ptr == nullptr); - ptr = (T *) ggml_cuda_pool_malloc(size * sizeof(T), &this->actual_size); + CUDA_CHECK(cudaGetDevice(&device)); + ptr = (T *) ggml_cuda_pool_malloc(device, size * sizeof(T), &this->actual_size); return ptr; } @@ -6774,7 +6781,7 @@ struct cuda_pool_alloc { ~cuda_pool_alloc() { if (ptr != nullptr) { - ggml_cuda_pool_free(ptr, actual_size); + ggml_cuda_pool_free(device, ptr, actual_size); } } @@ -6839,7 +6846,7 @@ void ggml_init_cublas() { alloc_prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; alloc_prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; alloc_prop.location.id = id; - CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); + CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); } #endif // !defined(GGML_USE_HIPBLAS) g_device_caps[id].vmm = !!device_vmm; @@ -6861,7 +6868,7 @@ void ggml_init_cublas() { } for (int id = 0; id < g_device_count; ++id) { - CUDA_CHECK(ggml_cuda_set_device(id)); + ggml_cuda_set_device(id); // create cuda streams for (int is = 0; is < MAX_STREAMS; ++is) { @@ -6976,7 +6983,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d( static void ggml_cuda_op_get_rows( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_d, const float * src1_d, float * dst_d, const cudaStream_t & stream) { + const float * src0_d, const float * src1_d, float * dst_d, cudaStream_t stream) { GGML_ASSERT(src1->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -7018,9 +7025,9 @@ static void ggml_cuda_op_get_rows( } template -inline void ggml_cuda_op_bin_bcast( +static void ggml_cuda_op_bin_bcast( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src1->type == GGML_TYPE_F32); @@ -7039,7 +7046,7 @@ inline void ggml_cuda_op_bin_bcast( static void ggml_cuda_op_repeat( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_d, const float * src1_d, float * dst_d, const cudaStream_t & main_stream) { + const float * src0_d, const float * src1_d, float * dst_d, cudaStream_t main_stream) { ggml_cuda_op_bin_bcast>(dst, src0, dst, nullptr, src0_d, dst_d, main_stream); @@ -7047,16 +7054,16 @@ static void ggml_cuda_op_repeat( (void) src1_d; } -inline void ggml_cuda_op_add( +static void ggml_cuda_op_add( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { ggml_cuda_op_bin_bcast>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); } -inline void ggml_cuda_op_acc( +static void ggml_cuda_op_acc( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); @@ -7073,23 +7080,23 @@ inline void ggml_cuda_op_acc( (void) dst; } -inline void ggml_cuda_op_mul( +static void ggml_cuda_op_mul( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { ggml_cuda_op_bin_bcast>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); } -inline void ggml_cuda_op_div( +static void ggml_cuda_op_div( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { ggml_cuda_op_bin_bcast>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); } -inline void ggml_cuda_op_gelu( +static void ggml_cuda_op_gelu( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7101,9 +7108,9 @@ inline void ggml_cuda_op_gelu( (void) src1_dd; } -inline void ggml_cuda_op_silu( +static void ggml_cuda_op_silu( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7115,9 +7122,9 @@ inline void ggml_cuda_op_silu( (void) src1_dd; } -inline void ggml_cuda_op_gelu_quick( +static void ggml_cuda_op_gelu_quick( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7129,9 +7136,9 @@ inline void ggml_cuda_op_gelu_quick( (void) src1_dd; } -inline void ggml_cuda_op_tanh( +static void ggml_cuda_op_tanh( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7143,9 +7150,9 @@ inline void ggml_cuda_op_tanh( (void) src1_dd; } -inline void ggml_cuda_op_relu( +static void ggml_cuda_op_relu( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7157,9 +7164,9 @@ inline void ggml_cuda_op_relu( (void) src1_dd; } -inline void ggml_cuda_op_leaky_relu( +static void ggml_cuda_op_leaky_relu( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7174,9 +7181,9 @@ inline void ggml_cuda_op_leaky_relu( (void) src1_dd; } -inline void ggml_cuda_op_sqr( +static void ggml_cuda_op_sqr( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7188,9 +7195,9 @@ inline void ggml_cuda_op_sqr( (void) src1_dd; } -inline void ggml_cuda_op_norm( +static void ggml_cuda_op_norm( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7208,10 +7215,9 @@ inline void ggml_cuda_op_norm( (void) src1_dd; } - -inline void ggml_cuda_op_group_norm( +static void ggml_cuda_op_group_norm( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7225,9 +7231,9 @@ inline void ggml_cuda_op_group_norm( (void) src1_dd; } -inline void ggml_cuda_op_concat( +static void ggml_cuda_op_concat( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); @@ -7241,9 +7247,9 @@ inline void ggml_cuda_op_concat( (void) dst; } -inline void ggml_cuda_op_upscale( +static void ggml_cuda_op_upscale( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -7258,9 +7264,9 @@ inline void ggml_cuda_op_upscale( (void) src1_dd; } -inline void ggml_cuda_op_pad( +static void ggml_cuda_op_pad( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -7275,9 +7281,9 @@ inline void ggml_cuda_op_pad( (void) src1_dd; } -inline void ggml_cuda_op_rms_norm( +static void ggml_cuda_op_rms_norm( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7295,10 +7301,10 @@ inline void ggml_cuda_op_rms_norm( (void) src1_dd; } -inline void ggml_cuda_op_mul_mat_q( +static void ggml_cuda_op_mul_mat_q( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, const cudaStream_t & stream) { + const int64_t src1_padded_row_size, cudaStream_t stream) { const int64_t ne00 = src0->ne[0]; @@ -7360,7 +7366,7 @@ inline void ggml_cuda_op_mul_mat_q( static int64_t get_row_rounding(ggml_type type) { int64_t min_compute_capability = INT_MAX; int64_t max_compute_capability = INT_MIN; - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { if (min_compute_capability > g_device_caps[id].cc) { min_compute_capability = g_device_caps[id].cc; @@ -7418,10 +7424,10 @@ static int64_t get_row_rounding(ggml_type type) { #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } -inline void ggml_cuda_op_mul_mat_vec_q( +static void ggml_cuda_op_mul_mat_vec_q( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, const cudaStream_t & stream) { + const int64_t src1_padded_row_size, cudaStream_t stream) { GGML_ASSERT(ggml_nrows(src1) == 1); @@ -7471,10 +7477,10 @@ inline void ggml_cuda_op_mul_mat_vec_q( (void) src1_padded_row_size; } -inline void ggml_cuda_op_dequantize_mul_mat_vec( +static void ggml_cuda_op_dequantize_mul_mat_vec( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, const cudaStream_t & stream) { + const int64_t src1_padded_row_size, cudaStream_t stream) { const int64_t ne00 = src0->ne[0]; const int64_t row_diff = row_high - row_low; @@ -7545,10 +7551,10 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( (void) src1_padded_row_size; } -inline void ggml_cuda_op_mul_mat_cublas( +static void ggml_cuda_op_mul_mat_cublas( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, const cudaStream_t & stream) { + const int64_t src1_padded_row_size, cudaStream_t stream) { GGML_ASSERT(src0_dd_i != nullptr); GGML_ASSERT(src1_ddf_i != nullptr); @@ -7637,9 +7643,9 @@ inline void ggml_cuda_op_mul_mat_cublas( (void) src1_padded_row_size; } -inline void ggml_cuda_op_rope( +static void ggml_cuda_op_rope( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); @@ -7717,9 +7723,9 @@ inline void ggml_cuda_op_rope( (void) src1_dd; } -inline void ggml_cuda_op_alibi( +static void ggml_cuda_op_alibi( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7748,9 +7754,9 @@ inline void ggml_cuda_op_alibi( (void) src1_dd; } -inline void ggml_cuda_op_im2col( +static void ggml_cuda_op_im2col( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); @@ -7783,10 +7789,9 @@ inline void ggml_cuda_op_im2col( (void) src0_dd; } - -inline void ggml_cuda_op_sum_rows( +static void ggml_cuda_op_sum_rows( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7801,9 +7806,9 @@ inline void ggml_cuda_op_sum_rows( (void) src1_dd; } -inline void ggml_cuda_op_argsort( +static void ggml_cuda_op_argsort( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_I32); @@ -7820,9 +7825,9 @@ inline void ggml_cuda_op_argsort( (void) src1_dd; } -inline void ggml_cuda_op_diag_mask_inf( +static void ggml_cuda_op_diag_mask_inf( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7840,9 +7845,9 @@ inline void ggml_cuda_op_diag_mask_inf( (void) src1_dd; } -inline void ggml_cuda_op_soft_max( +static void ggml_cuda_op_soft_max( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7861,9 +7866,9 @@ inline void ggml_cuda_op_soft_max( (void) dst; } -inline void ggml_cuda_op_scale( +static void ggml_cuda_op_scale( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7879,9 +7884,9 @@ inline void ggml_cuda_op_scale( (void) src1_dd; } -inline void ggml_cuda_op_clamp( +static void ggml_cuda_op_clamp( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, - const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -7974,12 +7979,12 @@ static void ggml_cuda_set_peer_access(const int n_tokens) { #ifdef NDEBUG for (int id = 0; id < g_device_count; ++id) { - CUDA_CHECK(ggml_cuda_set_device(id)); + ggml_cuda_set_device(id); CUDA_CHECK(cudaDeviceSynchronize()); } for (int id = 0; id < g_device_count; ++id) { - CUDA_CHECK(ggml_cuda_set_device(id)); + ggml_cuda_set_device(id); for (int id_other = 0; id_other < g_device_count; ++id_other) { if (id == id_other) { @@ -8013,7 +8018,6 @@ static void ggml_cuda_op_mul_mat( const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; const int64_t ne03 = src0->ne[3]; - const int64_t nrows0 = ggml_nrows(src0); const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; @@ -8056,27 +8060,29 @@ static void ggml_cuda_op_mul_mat( GGML_ASSERT(!(split && ne03 > 1)); GGML_ASSERT(!(split && ne02 < ne12)); - // dd = data device - char * src0_dd[GGML_CUDA_MAX_DEVICES] = {nullptr}; - float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float - char * src1_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // q8_1 - float * dst_dd[GGML_CUDA_MAX_DEVICES] = {nullptr}; + struct dev_data { + cuda_pool_alloc src0_dd_alloc; + cuda_pool_alloc src1_ddf_alloc; + cuda_pool_alloc src1_ddq_alloc; + cuda_pool_alloc dst_dd_alloc; - // as = actual size - size_t src0_as[GGML_CUDA_MAX_DEVICES] = {0}; - size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0}; - size_t src1_asq[GGML_CUDA_MAX_DEVICES] = {0}; - size_t dst_as[GGML_CUDA_MAX_DEVICES] = {0}; + char * src0_dd = nullptr; + float * src1_ddf = nullptr; // float + char * src1_ddq = nullptr; // q8_1 + float * dst_dd = nullptr; - int64_t row_low[GGML_CUDA_MAX_DEVICES]; - int64_t row_high[GGML_CUDA_MAX_DEVICES]; + int64_t row_low; + int64_t row_high; + }; + + dev_data dev[GGML_CUDA_MAX_DEVICES]; int used_devices = 0; - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { // by default, use all rows - row_low[id] = 0; - row_high[id] = ne01; + dev[id].row_low = 0; + dev[id].row_high = ne01; // for multi GPU, get the row boundaries from tensor split // and round to mul_mat_q tile sizes @@ -8084,23 +8090,23 @@ static void ggml_cuda_op_mul_mat( const int64_t rounding = get_row_rounding(src0->type); if (id != 0) { - row_low[id] = ne01*g_tensor_split[id]; - if (row_low[id] < ne01) { - row_low[id] -= row_low[id] % rounding; + dev[id].row_low = ne01*g_tensor_split[id]; + if (dev[id].row_low < ne01) { + dev[id].row_low -= dev[id].row_low % rounding; } } if (id != g_device_count - 1) { - row_high[id] = ne01*g_tensor_split[id + 1]; - if (row_high[id] < ne01) { - row_high[id] -= row_high[id] % rounding; + dev[id].row_high = ne01*g_tensor_split[id + 1]; + if (dev[id].row_high < ne01) { + dev[id].row_high -= dev[id].row_high % rounding; } } } } - for (int64_t id = 0; id < g_device_count; ++id) { - if ((!split && id != g_main_device) || row_low[id] == row_high[id]) { + for (int id = 0; id < g_device_count; ++id) { + if ((!split && id != g_main_device) || dev[id].row_low == dev[id].row_high) { continue; } @@ -8110,42 +8116,41 @@ static void ggml_cuda_op_mul_mat( const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device; ggml_cuda_set_device(id); - const cudaStream_t stream = g_cudaStreams[id][0]; + cudaStream_t stream = g_cudaStreams[id][0]; if (src0_on_device && src0_is_contiguous) { - src0_dd[id] = (char *) src0_extra->data_device[id]; + dev[id].src0_dd = (char *) src0_extra->data_device[id]; } else { - // const size_t size_src0_ddq = split ? (row_high[id]-row_low[id])*ne00 * src0_ts/src0_bs : ggml_nbytes(src0); - src0_dd[id] = (char *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_as[id]); + dev[id].src0_dd = dev[id].src0_dd_alloc.alloc(ggml_nbytes(src0)); } if (src1_on_device && src1_is_contiguous) { - src1_ddf[id] = (float *) src1_extra->data_device[id]; + dev[id].src1_ddf = (float *) src1_extra->data_device[id]; } else { - src1_ddf[id] = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf[id]); + dev[id].src1_ddf = dev[id].src1_ddf_alloc.alloc(ggml_nelements(src1)); } if (convert_src1_to_q8_1) { - src1_ddq[id] = (char *) ggml_cuda_pool_malloc(nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs, &src1_asq[id]); + dev[id].src1_ddq = dev[id].src1_ddq_alloc.alloc(nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs); if (src1_on_device && src1_is_contiguous) { - quantize_row_q8_1_cuda(src1_ddf[id], src1_ddq[id], ne10, nrows1, src1_padded_col_size, stream); + quantize_row_q8_1_cuda(dev[id].src1_ddf, dev[id].src1_ddq, ne10, nrows1, src1_padded_col_size, stream); CUDA_CHECK(cudaGetLastError()); } } if (dst_on_device) { - dst_dd[id] = (float *) dst_extra->data_device[id]; + dev[id].dst_dd = (float *) dst_extra->data_device[id]; } else { - const size_t size_dst_ddf = split ? (row_high[id]-row_low[id])*ne1*sizeof(float) : ggml_nbytes(dst); - dst_dd[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_as[id]); + const size_t size_dst_ddf = split ? (dev[id].row_high - dev[id].row_low)*ne1 : ggml_nelements(dst); + dev[id].dst_dd = dev[id].dst_dd_alloc.alloc(size_dst_ddf); } } // if multiple devices are used they need to wait for the main device // here an event is recorded that signals that the main device has finished calculating the input data if (split && used_devices > 1) { - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device][0], g_cudaStreams[g_main_device][0])); } @@ -8154,17 +8159,17 @@ static void ggml_cuda_op_mul_mat( const int64_t is = split ? (src1_col_0/src1_col_stride) % MAX_STREAMS : 0; const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride; - for (int64_t id = 0; id < g_device_count; ++id) { - if ((!split && id != g_main_device) || row_low[id] == row_high[id]) { + for (int id = 0; id < g_device_count; ++id) { + if ((!split && id != g_main_device) || dev[id].row_low == dev[id].row_high) { continue; } const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device; const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device; - const int64_t row_diff = row_high[id] - row_low[id]; + const int64_t row_diff = dev[id].row_high - dev[id].row_low; ggml_cuda_set_device(id); - const cudaStream_t stream = g_cudaStreams[id][is]; + cudaStream_t stream = g_cudaStreams[id][is]; // wait for main GPU data if necessary if (split && (id != g_main_device || is != 0)) { @@ -8178,34 +8183,34 @@ static void ggml_cuda_op_mul_mat( const size_t src1_ddq_i_offset = (i0*ne11 + src1_col_0) * src1_padded_col_size*q8_1_ts/q8_1_bs; // for split tensors the data begins at i0 == i0_offset_low - char * src0_dd_i = src0_dd[id] + (i0/i02_divisor) * (ne01*ne00*src0_ts)/src0_bs; - float * src1_ddf_i = src1_ddf[id] + (i0*ne11 + src1_col_0) * ne10; - char * src1_ddq_i = src1_ddq[id] + src1_ddq_i_offset; - float * dst_dd_i = dst_dd[id] + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff); + char * src0_dd_i = dev[id].src0_dd + (i0/i02_divisor) * (ne01*ne00*src0_ts)/src0_bs; + float * src1_ddf_i = dev[id].src1_ddf + (i0*ne11 + src1_col_0) * ne10; + char * src1_ddq_i = dev[id].src1_ddq + src1_ddq_i_offset; + float * dst_dd_i = dev[id].dst_dd + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff); // the main device memory buffer can be on VRAM scratch, with space for all partial results // in that case an offset on dst_ddf_i is needed if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) { - dst_dd_i += row_low[id]; // offset is 0 if no tensor split + dst_dd_i += dev[id].row_low; // offset is 0 if no tensor split } // copy src0, src1 to device if necessary if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) { if (id != g_main_device) { if (convert_src1_to_q8_1) { - char * src1_ddq_i_source = src1_ddq[g_main_device] + src1_ddq_i_offset; - CUDA_CHECK(cudaMemcpyAsync(src1_ddq_i, src1_ddq_i_source, src1_ncols*src1_padded_col_size*q8_1_ts/q8_1_bs, - cudaMemcpyDeviceToDevice, stream)); + char * src1_ddq_i_source = dev[g_main_device].src1_ddq + src1_ddq_i_offset; + CUDA_CHECK(cudaMemcpyPeerAsync(src1_ddq_i, id, src1_ddq_i_source, g_main_device, + src1_ncols*src1_padded_col_size*q8_1_ts/q8_1_bs, stream)); } else { float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device]; src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10; - CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_ncols*ne10*sizeof(float), - cudaMemcpyDeviceToDevice, stream)); + CUDA_CHECK(cudaMemcpyPeerAsync(src1_ddf_i, id, src1_ddf_i_source, g_main_device, + src1_ncols*ne10*sizeof(float), stream)); } } } else if (src1->backend == GGML_BACKEND_CPU || (src1_on_device && !src1_is_contiguous)) { CUDA_CHECK(ggml_cuda_cpy_tensor_2d( - src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream)); + src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream)); } else { GGML_ASSERT(false); } @@ -8216,12 +8221,12 @@ static void ggml_cuda_op_mul_mat( } if (src1_col_0 == 0 && (!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) { - CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, row_low[id], row_high[id], stream)); + CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, dev[id].row_low, dev[id].row_high, stream)); } // do the computation op(src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i, - row_low[id], row_high[id], src1_ncols, src1_padded_col_size, stream); + dev[id].row_low, dev[id].row_high, src1_ncols, src1_padded_col_size, stream); CUDA_CHECK(cudaGetLastError()); // copy dst to host or other device if necessary @@ -8245,9 +8250,25 @@ static void ggml_cuda_op_mul_mat( // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); - dhf_dst_i += src1_col_0*ne0 + row_low[id]; - CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), dst_dd_i, row_diff*sizeof(float), - row_diff*sizeof(float), src1_ncols, kind, stream)); + dhf_dst_i += src1_col_0*ne0 + dev[id].row_low; +#if !defined(GGML_USE_HIPBLAS) + if (kind == cudaMemcpyDeviceToDevice) { + // cudaMemcpy2DAsync may fail with copies between vmm pools of different devices + cudaMemcpy3DPeerParms p = {}; + p.dstDevice = g_main_device; + p.dstPtr = make_cudaPitchedPtr(dhf_dst_i, ne0*sizeof(float), row_diff, src1_ncols); + p.srcDevice = id; + p.srcPtr = make_cudaPitchedPtr(dst_dd_i, row_diff*sizeof(float), row_diff, src1_ncols); + p.extent = make_cudaExtent(row_diff*sizeof(float), src1_ncols, 1); + CUDA_CHECK(cudaMemcpy3DPeerAsync(&p, stream)); + } else +#endif + { + CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), + dst_dd_i, row_diff*sizeof(float), + row_diff*sizeof(float), src1_ncols, + kind, stream)); + } } else { float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); @@ -8264,35 +8285,14 @@ static void ggml_cuda_op_mul_mat( } } - for (int64_t id = 0; id < g_device_count; ++id) { - if ((!split && id != g_main_device) || row_low[id] == row_high[id]) { - continue; - } - CUDA_CHECK(ggml_cuda_set_device(id)); - - // free buffers again when done - if (dst_as[id] > 0) { - ggml_cuda_pool_free(dst_dd[id], dst_as[id]); - } - if (src1_asq[id] > 0) { - ggml_cuda_pool_free(src1_ddq[id], src1_asq[id]); - } - if (src1_asf[id] > 0) { - ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); - } - if (src0_as[id] > 0) { - ggml_cuda_pool_free(src0_dd[id], src0_as[id]); - } - } - // main device waits for all other devices to be finished if (split && g_device_count > 1) { int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE; is_max = is_max <= MAX_STREAMS ? is_max : MAX_STREAMS; - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); - for (int64_t id = 0; id < g_device_count; ++id) { - if (row_low[id] == row_high[id]) { + ggml_cuda_set_device(g_main_device); + for (int id = 0; id < g_device_count; ++id) { + if (dev[id].row_low == dev[id].row_high) { continue; } for (int64_t is = 0; is < is_max; ++is) { @@ -8302,7 +8302,7 @@ static void ggml_cuda_op_mul_mat( } if (dst->backend == GGML_BACKEND_CPU) { - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); CUDA_CHECK(cudaDeviceSynchronize()); } } @@ -8412,7 +8412,7 @@ static void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tens const int64_t ne12 = src1->ne[2]; - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; @@ -8444,7 +8444,7 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor const int64_t ne12 = src1->ne[2]; - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; @@ -8515,7 +8515,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const int64_t ne1 = ggml_nelements(src1); const int64_t ne = ggml_nelements(dst); - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream)); @@ -8656,7 +8656,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT; int64_t min_compute_capability = INT_MAX; - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { min_compute_capability = g_device_caps[id].cc; } @@ -8799,7 +8799,7 @@ static void ggml_cuda_mul_mat_id_cublas(ggml_tensor * dst) { const int64_t ne1 = ggml_nelements(src1); const int64_t ne = ggml_nelements(dst); - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream)); @@ -8917,7 +8917,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s std::vector ids_host(ggml_nbytes(ids)); - const cudaStream_t stream = g_cudaStreams[g_main_device][0]; + cudaStream_t stream = g_cudaStreams[g_main_device][0]; if (ids->backend == GGML_BACKEND_GPU) { const char * ids_dev = (const char *)((const ggml_tensor_extra_gpu *)ids->extra)->data_device[g_main_device]; @@ -9073,7 +9073,7 @@ static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, gg const int64_t nb11 = src1->nb[1]; const int64_t nb12 = src1->nb[2]; - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; const ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; @@ -9163,7 +9163,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu; memset(extra, 0, sizeof(*extra)); - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { if (backend == GGML_BACKEND_GPU && id != g_main_device) { continue; } @@ -9234,15 +9234,14 @@ void ggml_cuda_free_data(struct ggml_tensor * tensor) { ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { + ggml_cuda_set_device(id); if (extra->data_device[id] != nullptr) { - CUDA_CHECK(ggml_cuda_set_device(id)); CUDA_CHECK(cudaFree(extra->data_device[id])); } for (int64_t is = 0; is < MAX_STREAMS; ++is) { if (extra->events[id][is] != nullptr) { - CUDA_CHECK(ggml_cuda_set_device(id)); CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); } } @@ -9296,7 +9295,7 @@ static void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scra force_inplace; const size_t size = ggml_nbytes(tensor); - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) { ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra; char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; @@ -9373,7 +9372,7 @@ void ggml_cuda_copy_to_device(struct ggml_tensor * tensor) { GGML_ASSERT(ggml_is_contiguous(tensor)); ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; - CUDA_CHECK(ggml_cuda_set_device(g_main_device)); + ggml_cuda_set_device(g_main_device); CUDA_CHECK(cudaMemcpy(extra->data_device[g_main_device], tensor->data, ggml_nbytes(tensor), cudaMemcpyHostToDevice)); } diff --git a/ggml.c b/ggml.c index d24560480..ed56e60a8 100644 --- a/ggml.c +++ b/ggml.c @@ -4041,7 +4041,6 @@ static struct ggml_tensor * ggml_group_norm_impl( result->op = GGML_OP_GROUP_NORM; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; - result->src[1] = NULL; // TODO: maybe store epsilon here? return result; } @@ -5541,7 +5540,6 @@ static struct ggml_tensor * ggml_upscale_impl( result->op_params[0] = scale_factor; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; - result->src[1] = NULL; return result; } @@ -5846,7 +5844,6 @@ struct ggml_tensor * ggml_get_rel_pos( result->op = GGML_OP_GET_REL_POS; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; - result->src[1] = NULL; return result; } diff --git a/llama.cpp b/llama.cpp index 0b99f1e03..4aa59c4c0 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9519,7 +9519,8 @@ struct llama_context * llama_new_context_with_model( ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc); #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (model->n_gpu_layers > 0) { - ggml_cuda_set_scratch_size(alloc_size); + // the CPU buffer adds this padding in case the malloc buffer is not aligned, so we need to do the same for the GPU buffer, since we use the same offsets + ggml_cuda_set_scratch_size(alloc_size + 64); LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0); // calculate total VRAM usage From f56d6077d0c37e6606ac0a4fa3169de70593acfe Mon Sep 17 00:00:00 2001 From: wonjun Jang Date: Wed, 27 Dec 2023 17:37:25 +0900 Subject: [PATCH 59/84] Add byte token type when tokenizer.model is not exists (#4641) * Add byte token type to hf format * remove unused variable --- convert.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/convert.py b/convert.py index 7a3cd615e..1f0c4f2f4 100755 --- a/convert.py +++ b/convert.py @@ -357,6 +357,7 @@ class VocabLoader: for tok in self.tokenizer.all_special_tokens } self.special_ids: set[int] = set(self.tokenizer.all_special_ids) + self.reverse_vocab = {id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()} self.vocab_size_base: int = self.tokenizer.vocab_size self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_dict) self.fname_tokenizer: Path = fname_tokenizer @@ -370,15 +371,13 @@ class VocabLoader: self.spm = None def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: - tokenizer = self.tokenizer - reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.get_vocab().items()} added_tokens_ids = set(self.added_tokens_dict.values()) for i in range(self.vocab_size_base): if i in added_tokens_ids: continue - text = reverse_vocab[i].encode("utf-8") + text = self.reverse_vocab[i].encode("utf-8") yield text, self.get_token_score(i), self.get_token_type(i) def get_token_type(self, token_id: int) -> gguf.TokenType: @@ -394,10 +393,13 @@ class VocabLoader: if self.spm.is_byte(token_id): toktype = gguf.TokenType.BYTE else: + token = self.reverse_vocab[token_id] if token_id == self.unk_token_id: toktype = gguf.TokenType.UNKNOWN - if token_id in self.special_ids: + elif token_id in self.special_ids: toktype = gguf.TokenType.CONTROL + elif len(token) == 6 and token.startswith("<0x") and token.endswith(">"): + toktype = gguf.TokenType.BYTE return toktype From 951010fa53a0ffe81b7d2e87c4349e0d3cb3d19d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 27 Dec 2023 11:02:13 +0200 Subject: [PATCH 60/84] ggml : fix dot product for ARM (#4630) ggml-ci --- ggml-quants.c | 363 +++----------------------------------------------- 1 file changed, 22 insertions(+), 341 deletions(-) diff --git a/ggml-quants.c b/ggml-quants.c index a15a24048..05ef8f9b7 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -407,6 +407,18 @@ inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { #define ggml_vld1q_s8_x4 vld1q_s8_x4 #endif + +#if !defined(__ARM_FEATURE_DOTPROD) + +inline static int32x4_t vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { + const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); + const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); + + return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); +} + +#endif + #endif #if defined(__ARM_NEON) || defined(__wasm_simd128__) @@ -2468,32 +2480,12 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); -#if defined(__ARM_FEATURE_DOTPROD) // dot product into int32x4_t const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#endif } *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -2776,32 +2768,12 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); -#if defined(__ARM_FEATURE_DOTPROD) // dot product into int32x4_t const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); -#endif } *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; @@ -2963,32 +2935,12 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); -#if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#endif } *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -3275,32 +3227,12 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); -#if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); -#endif } *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; @@ -3550,7 +3482,6 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri const int8x16_t y1_0 = vld1q_s8(y1->qs); const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); -#if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); @@ -3558,26 +3489,6 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - -#else - const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0)); - const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0)); - const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1)); - const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1)); - - const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0)); - const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0)); - const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1)); - const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1)); - - const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1)); - const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3)); - const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1)); - const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#endif } *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -3650,12 +3561,10 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - const uint8x16_t m3 = vdupq_n_u8(0x3); const uint8x16_t m4 = vdupq_n_u8(0xF); -#if defined(__ARM_FEATURE_DOTPROD) - const int32x4_t vzero = vdupq_n_s32(0); -#endif + + const int32x4_t vzero = vdupq_n_s32(0); ggml_int8x16x2_t q2bytes; uint8_t aux[16]; @@ -3663,7 +3572,6 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri float sum = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); @@ -3689,20 +3597,9 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri // We use this macro instead of a function call because for some reason // the code runs 2-3% slower, even if the function is declared inline -#if defined(__ARM_FEATURE_DOTPROD) #define MULTIPLY_ACCUM_WITH_SCALE(index)\ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; -#else -#define MULTIPLY_ACCUM_WITH_SCALE(index)\ - {\ - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),\ - vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));\ - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),\ - vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));\ - isum += vaddvq_s16(p1) * aux[is+(index)] + vaddvq_s16(p2) * aux[is+1+(index)];\ - } -#endif #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ @@ -3710,26 +3607,23 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ MULTIPLY_ACCUM_WITH_SCALE((index)); - for (int j = 0; j < QK_K/128; ++j) { - const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); + MULTIPLY_ACCUM_WITH_SCALE(0); SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); is += 8; } - sum += d * isum; + sum += d * isum; } *s = sum; @@ -4043,11 +3937,9 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - const uint8x16_t m3 = vdupq_n_u8(0x3); -#if defined(__ARM_FEATURE_DOTPROD) - const int32x4_t vzero = vdupq_n_s32(0); -#endif + + const int32x4_t vzero = vdupq_n_s32(0); ggml_int8x16x4_t q2bytes; @@ -4081,28 +3973,12 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3)); q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3)); -#if defined(__ARM_FEATURE_DOTPROD) isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0]; isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1]; isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2]; isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3]; -#else - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum1 += vaddvq_s16(p1) * scales[0]; - isum2 += vaddvq_s16(p2) * scales[1]; - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p4 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum1 += vaddvq_s16(p3) * scales[2]; - isum2 += vaddvq_s16(p4) * scales[3]; -#endif sum += d * (isum1 + isum2); - } *s = sum; @@ -4328,9 +4204,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri uint32_t utmp[4]; const uint8x16_t m3b = vdupq_n_u8(0x3); -#ifdef __ARM_FEATURE_DOTPROD const int32x4_t vzero = vdupq_n_s32(0); -#endif const uint8x16_t m0 = vdupq_n_u8(1); const uint8x16_t m1 = vshlq_n_u8(m0, 1); @@ -4382,22 +4256,11 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; -#else - int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_1.val[0])), - vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_1.val[0]))); - int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_1.val[1])), - vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_1.val[1]))); - int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_1.val[2])), - vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_1.val[2]))); - int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_1.val[3])), - vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_1.val[3]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3]; -#endif + scale += 4; q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); @@ -4410,22 +4273,11 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; -#else - p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_2.val[0])), - vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_2.val[0]))); - p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_2.val[1])), - vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_2.val[1]))); - p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_2.val[2])), - vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_2.val[2]))); - p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_2.val[3])), - vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_2.val[3]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3]; -#endif + scale += 4; if (j == 0) { @@ -4864,10 +4716,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - -#ifdef __ARM_FEATURE_DOTPROD - const int32x4_t vzero = vdupq_n_s32(0); -#endif + const int32x4_t vzero = vdupq_n_s32(0); const uint8x16_t m3b = vdupq_n_u8(0x3); const uint8x16_t mh = vdupq_n_u8(4); @@ -4908,22 +4757,10 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2])); q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1]; isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3]; -#else - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p0) * scales[0] + vaddvq_s16(p1) * scales[2] + vaddvq_s16(p2) * scales[1] + vaddvq_s16(p3) * scales[3]; -#endif sum += d * isum; @@ -5228,11 +5065,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri uint32_t utmp[4]; #ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); -#ifdef __ARM_FEATURE_DOTPROD const int32x4_t mzero = vdupq_n_s32(0); -#endif ggml_int8x16x2_t q4bytes; ggml_int8x16x2_t q8bytes; @@ -5269,10 +5103,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri int32_t sumi2 = 0; for (int j = 0; j < QK_K/64; ++j) { - const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; -#ifdef __ARM_FEATURE_DOTPROD q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); @@ -5287,26 +5119,6 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); sumi2 += vaddvq_s32(p2) * scales[2*j+1]; -#else - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2*j+0]; - - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2*j+1]; - -#endif } sumf += d * (sumi1 + sumi2); @@ -5603,12 +5415,9 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); -#ifdef __ARM_FEATURE_DOTPROD const int32x4_t mzero = vdupq_n_s32(0); -#endif float sumf = 0; @@ -5636,7 +5445,6 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); -#ifdef __ARM_FEATURE_DOTPROD q8bytes = ggml_vld1q_s8_x4(q8); q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); @@ -5650,27 +5458,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]); const int32_t sumi2 = vaddvq_s32(p2) * scales[1]; -#else - q8bytes = ggml_vld1q_s8_x4(q8); - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0]; - - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3]))); - int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1]; - -#endif sumf += d * (sumi1 + sumi2); - } *s = sumf - sum_mins; @@ -5875,15 +5663,11 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri uint32_t utmp[4]; - #ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); const uint8x16_t mone = vdupq_n_u8(1); const uint8x16_t mtwo = vdupq_n_u8(2); -#if defined(__ARM_FEATURE_DOTPROD) const int32x4_t mzero = vdupq_n_s32(0); -#endif ggml_int8x16x4_t q5bytes; @@ -5938,28 +5722,11 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) - sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; -#else - - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - sumi += vaddvq_s16(vaddq_s16(p0, p1)) * *scales++; - - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - sumi += vaddvq_s16(vaddq_s16(p2, p3)) * *scales++; -#endif } sumf += d * sumi - dmin * sumi_mins; - } *s = sumf; @@ -6311,12 +6078,9 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); const uint8x16_t mh = vdupq_n_u8(16); -#if defined(__ARM_FEATURE_DOTPROD) const int32x4_t mzero = vdupq_n_s32(0); -#endif ggml_int8x16x4_t q5bytes; ggml_uint8x16x4_t q5h; @@ -6348,32 +6112,12 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2])); q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) - int32_t sumi1 = sc[0] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0])); int32_t sumi2 = sc[1] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1])); int32_t sumi3 = sc[2] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2])); int32_t sumi4 = sc[3] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3])); sumf += d * (sumi1 + sumi2 + sumi3 + sumi4); - -#else - - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - int32_t sumi = sc[0] * vaddvq_s16(p0) + sc[1] * vaddvq_s16(p1); - - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - sumi += sc[2] * vaddvq_s16(p2) + sc[3] * vaddvq_s16(p3); - - sumf += d*sumi; -#endif - } *s = sumf; @@ -6600,13 +6344,10 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - float sum = 0; const uint8x16_t m4b = vdupq_n_u8(0xF); -#if defined(__ARM_FEATURE_DOTPROD) const int32x4_t vzero = vdupq_n_s32(0); -#endif //const int8x16_t m32s = vdupq_n_s8(32); const uint8x16_t mone = vdupq_n_u8(3); @@ -6658,31 +6399,13 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + scale += 4; -#else - - int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - scale += 2; - - int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; - scale += 2; -#endif - q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; shifted = vshrq_n_u8(qhbits.val[0], 4); @@ -6703,34 +6426,11 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); -#if defined(__ARM_FEATURE_DOTPROD) - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; scale += 4; - - //for (int l = 0; l < 4; ++l) { - // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]); - // isum += vaddvq_s32(p) * *scale++; - //} -#else - p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - scale += 2; - - p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; - scale += 2; -#endif - } //sum += isum * d_all * y[i].d; sum += d_all * y[i].d * (isum - 32 * isum_mins); @@ -7076,14 +6776,11 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const int nb = n / QK_K; #ifdef __ARM_NEON - float sum = 0; const uint8x16_t m4b = vdupq_n_u8(0xF); const int8x16_t m32s = vdupq_n_s8(32); -#if defined(__ARM_FEATURE_DOTPROD) const int32x4_t vzero = vdupq_n_s32(0); -#endif const uint8x16_t mone = vdupq_n_u8(3); @@ -7119,26 +6816,10 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s); q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s); -#if defined(__ARM_FEATURE_DOTPROD) - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; -#else - - int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - - int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3]; -#endif sum += isum * d_all * y[i].d; From b47879b0dda43f2d26415e88b6840295817e552a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 27 Dec 2023 11:15:31 +0200 Subject: [PATCH 61/84] scripts : add sync-ggml-am.sh --- scripts/sync-ggml-am.sh | 131 ++++++++++++++++++++++++++++++++++++++++ scripts/sync-ggml.last | 1 + 2 files changed, 132 insertions(+) create mode 100755 scripts/sync-ggml-am.sh create mode 100644 scripts/sync-ggml.last diff --git a/scripts/sync-ggml-am.sh b/scripts/sync-ggml-am.sh new file mode 100755 index 000000000..83abe3681 --- /dev/null +++ b/scripts/sync-ggml-am.sh @@ -0,0 +1,131 @@ +#!/bin/bash +# +# Synchronize ggml changes to llama.cpp +# +# Usage: +# +# $ cd /path/to/llama.cpp +# $ ./scripts/sync-ggml-am.sh +# + +set -e + +sd=$(dirname $0) +cd $sd/../ + +SRC_LLAMA=$(pwd) +SRC_GGML=$(cd ../ggml; pwd) + +if [ ! -d $SRC_GGML ]; then + echo "ggml not found at $SRC_GGML" + exit 1 +fi + +lc=$(cat $SRC_LLAMA/scripts/sync-ggml.last) +echo "Syncing ggml changes since commit $lc" + +cd $SRC_GGML + +git log --oneline $lc..HEAD + +git format-patch $lc --stdout -- \ + include/ggml/ggml*.h \ + src/ggml*.h \ + src/ggml*.c \ + src/ggml*.cpp \ + src/ggml*.m \ + src/ggml*.metal \ + src/ggml*.cu \ + tests/test-opt.cpp \ + tests/test-grad0.cpp \ + tests/test-quantize-fns.cpp \ + tests/test-quantize-perf.cpp \ + tests/test-backend-ops.cpp \ + > $SRC_LLAMA/ggml-src.patch + +# delete files if empty +if [ ! -s $SRC_LLAMA/ggml-src.patch ]; then + rm -v $SRC_LLAMA/ggml-src.patch +fi + +cd $SRC_LLAMA + +if [ -f $SRC_LLAMA/ggml-src.patch ]; then + # replace PR numbers + # + # Subject: some text (#1234) + # Subject: some text (ggml/1234) + cat ggml-src.patch | sed -e 's/^Subject: \(.*\) (#\([0-9]*\))/Subject: \1 (ggml\/\2)/' > ggml-src.patch.tmp + mv ggml-src.patch.tmp ggml-src.patch + + cat ggml-src.patch | sed -e 's/^\(.*\) (#\([0-9]*\))$/\1 (ggml\/\2)/' > ggml-src.patch.tmp + mv ggml-src.patch.tmp ggml-src.patch + + # replace filenames: + # + # src/ggml.c -> ggml.c + # src/ggml-alloc.c -> ggml-alloc.c + # src/ggml-backend-impl.h -> ggml-backend-impl.h + # src/ggml-backend.c -> ggml-backend.c + # src/ggml-cuda.cu -> ggml-cuda.cu + # src/ggml-cuda.h -> ggml-cuda.h + # src/ggml-impl.h -> ggml-impl.h + # src/ggml-metal.h -> ggml-metal.h + # src/ggml-metal.m -> ggml-metal.m + # src/ggml-metal.metal -> ggml-metal.metal + # src/ggml-mpi.h -> ggml-mpi.h + # src/ggml-mpi.c -> ggml-mpi.c + # src/ggml-opencl.cpp -> ggml-opencl.cpp + # src/ggml-opencl.h -> ggml-opencl.h + # src/ggml-quants.c -> ggml-quants.c + # src/ggml-quants.h -> ggml-quants.h + # include/ggml/ggml.h -> ggml.h + # include/ggml/ggml-alloc.h -> ggml-alloc.h + # include/ggml/ggml-backend.h -> ggml-backend.h + # + # tests/test-opt.cpp -> tests/test-opt.cpp + # tests/test-grad0.cpp -> tests/test-grad0.cpp + # tests/test-quantize-fns.cpp -> tests/test-quantize-fns.cpp + # tests/test-quantize-perf.cpp -> tests/test-quantize-perf.cpp + # tests/test-backend-ops.cpp -> tests/test-backend-ops.cpp + + cat ggml-src.patch | sed \ + -e 's/src\/ggml\.c/ggml.c/g' \ + -e 's/src\/ggml-alloc\.c/ggml-alloc.c/g' \ + -e 's/src\/ggml-backend-impl\.h/ggml-backend-impl.h/g' \ + -e 's/src\/ggml-backend\.c/ggml-backend.c/g' \ + -e 's/src\/ggml-cuda\.cu/ggml-cuda.cu/g' \ + -e 's/src\/ggml-cuda\.h/ggml-cuda.h/g' \ + -e 's/src\/ggml-impl\.h/ggml-impl.h/g' \ + -e 's/src\/ggml-metal\.h/ggml-metal.h/g' \ + -e 's/src\/ggml-metal\.m/ggml-metal.m/g' \ + -e 's/src\/ggml-metal\.metal/ggml-metal.metal/g' \ + -e 's/src\/ggml-mpi\.h/ggml-mpi.h/g' \ + -e 's/src\/ggml-mpi\.c/ggml-mpi.c/g' \ + -e 's/src\/ggml-opencl\.cpp/ggml-opencl.cpp/g' \ + -e 's/src\/ggml-opencl\.h/ggml-opencl.h/g' \ + -e 's/src\/ggml-quants\.c/ggml-quants.c/g' \ + -e 's/src\/ggml-quants\.h/ggml-quants.h/g' \ + -e 's/include\/ggml\/ggml\.h/ggml.h/g' \ + -e 's/include\/ggml\/ggml-alloc\.h/ggml-alloc.h/g' \ + -e 's/include\/ggml\/ggml-backend\.h/ggml-backend.h/g' \ + -e 's/tests\/test-opt\.cpp/tests\/test-opt.cpp/g' \ + -e 's/tests\/test-grad0\.cpp/tests\/test-grad0.cpp/g' \ + -e 's/tests\/test-quantize-fns\.cpp/tests\/test-quantize-fns.cpp/g' \ + -e 's/tests\/test-quantize-perf\.cpp/tests\/test-quantize-perf.cpp/g' \ + -e 's/tests\/test-backend-ops\.cpp/tests\/test-backend-ops.cpp/g' \ + > ggml-src.patch.tmp + mv ggml-src.patch.tmp ggml-src.patch + + git am ggml-src.patch + + rm -v $SRC_LLAMA/ggml-src.patch +fi + +# update last commit +cd $SRC_GGML +git log -1 --format=%H > $SRC_LLAMA/scripts/sync-ggml.last + +echo "Done" + +exit 0 diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last new file mode 100644 index 000000000..1ec144116 --- /dev/null +++ b/scripts/sync-ggml.last @@ -0,0 +1 @@ +76e7f47b69e8334384dc718480c496dafbd47999 From 879b690a9e1eb1ab0a29b58236fc76978fb4d902 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 27 Dec 2023 15:16:55 +0100 Subject: [PATCH 62/84] finetune : fix output formatting in print_params (#4653) This commit fixes the output formatting in the print_params function which currently looks like this: ```console print_params: n_vocab: 32000 print_params: n_ctx: 128 print_params: n_embd: 4096 print_params: n_ff: 11008 print_params: n_head: 32 print_params: n_head_kv: 32 print_params: n_layer: 32 print_params: norm_rms_eps : 0.000010 print_params: rope_freq_base : 10000.000000 print_params: rope_freq_scale : 1.000000 ``` With this comit the output will look like this: ```console print_params: n_vocab : 32000 print_params: n_ctx : 128 print_params: n_embd : 4096 print_params: n_ff : 11008 print_params: n_head : 32 print_params: n_head_kv : 32 print_params: n_layer : 32 print_params: norm_rms_eps : 0.000010 print_params: rope_freq_base : 10000.000000 print_params: rope_freq_scale : 1.000000 ``` Signed-off-by: Daniel Bevenius --- examples/finetune/finetune.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 7b1333a9d..e0520f64c 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -196,13 +196,13 @@ static const char * LLM_TENSOR_FFN_DOWN = "blk.%d.ffn_down"; static const char * LLM_TENSOR_FFN_UP = "blk.%d.ffn_up"; static void print_params(struct my_llama_hparams * params) { - printf("%s: n_vocab: %u\n", __func__, params->n_vocab); - printf("%s: n_ctx: %u\n", __func__, params->n_ctx); - printf("%s: n_embd: %u\n", __func__, params->n_embd); - printf("%s: n_ff: %u\n", __func__, params->n_ff); - printf("%s: n_head: %u\n", __func__, params->n_head); - printf("%s: n_head_kv: %u\n", __func__, params->n_head_kv); - printf("%s: n_layer: %u\n", __func__, params->n_layer); + printf("%s: n_vocab : %u\n", __func__, params->n_vocab); + printf("%s: n_ctx : %u\n", __func__, params->n_ctx); + printf("%s: n_embd : %u\n", __func__, params->n_embd); + printf("%s: n_ff : %u\n", __func__, params->n_ff); + printf("%s: n_head : %u\n", __func__, params->n_head); + printf("%s: n_head_kv : %u\n", __func__, params->n_head_kv); + printf("%s: n_layer : %u\n", __func__, params->n_layer); printf("%s: norm_rms_eps : %f\n", __func__, params->f_norm_rms_eps); printf("%s: rope_freq_base : %f\n", __func__, params->rope_freq_base); printf("%s: rope_freq_scale : %f\n", __func__, params->rope_freq_scale); From f6793491b5af6da75edad34d6f503ef86d31b09f Mon Sep 17 00:00:00 2001 From: "Nam D. Tran" <42194884+namtranase@users.noreply.github.com> Date: Wed, 27 Dec 2023 22:39:45 +0700 Subject: [PATCH 63/84] llama : add AWQ for llama, llama2, mpt, and mistral models (#4593) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update: awq support llama-7b model * update: change order * update: benchmark results for llama2-7b * update: mistral 7b v1 benchmark * update: support 4 models * fix: Readme * update: ready for PR * update: readme * fix: readme * update: change order import * black * format code * update: work for bot mpt and awqmpt * update: readme * Rename to llm_build_ffn_mpt_awq * Formatted other files * Fixed params count * fix: remove code * update: more detail for mpt * fix: readme * fix: readme * update: change folder architecture * fix: common.cpp * fix: readme * fix: remove ggml_repeat * update: cicd * update: cicd * uppdate: remove use_awq arg * update: readme * llama : adapt plamo to new ffn ggml-ci --------- Co-authored-by: Trần Đức Nam Co-authored-by: Le Hoang Anh Co-authored-by: Georgi Gerganov --- awq-py/README.md | 116 +++++++++++++++ awq-py/awq/apply_awq.py | 254 +++++++++++++++++++++++++++++++++ awq-py/requirements.txt | 2 + convert-hf-to-gguf.py | 27 +++- convert.py | 14 ++ gguf-py/gguf/constants.py | 3 + gguf-py/gguf/tensor_mapping.py | 5 + llama.cpp | 27 +++- 8 files changed, 443 insertions(+), 5 deletions(-) create mode 100644 awq-py/README.md create mode 100644 awq-py/awq/apply_awq.py create mode 100644 awq-py/requirements.txt diff --git a/awq-py/README.md b/awq-py/README.md new file mode 100644 index 000000000..59354f4e3 --- /dev/null +++ b/awq-py/README.md @@ -0,0 +1,116 @@ +# AWQ: Activation-aware Weight Quantization for LLM - version apply to llamacpp +[[Paper](https://arxiv.org/abs/2306.00978)][[Original Repo](https://github.com/mit-han-lab/llm-awq)][[Easy-to-use Repo](https://github.com/casper-hansen/AutoAWQ)] + +**Supported models:** + +- [X] LLaMA +- [x] LLaMA 2 +- [X] MPT +- [X] Mistral AI v0.1 +- [ ] Bloom +- [ ] Mixtral MoE + +**TODO:** +- [x] Update version work with both MPT and MPT-AWQ model +- [ ] Add OPT model +- [ ] Add Bloom model +- [ ] Add Mixtral MoE +- [ ] Support w3, w2 + + +## Contents + +- [Install](##Install) +- [Convert](##Convert) +- [Quantize](##Quantize) +- [Test](##Test) +- [Benchmark](##Benchmark) +- [Results](##Results) + +## Install +Install requirements +```bash +pip install -r requirements.txt +``` +Get the pre-computed AWQ search results for multiple model families, including LLaMA, LLaMA2, MPT, OPT +```bash +git clone https://huggingface.co/datasets/mit-han-lab/awq-model-zoo awq_cache +``` + +## Convert +Example for llama model +```bash +# For llama7b and llama2 models +python convert.py models/llama-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/llama_7b_fp16.gguf +# For mistral and mpt models +python convert-hf-to-gguf.py models/mpt-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/mpt_7b_fp16.gguf +``` + +## Quantize +```bash +# We only benchmark and confirm the results on q4_0, q4_1, and q2_k types. +./quantize models/llama_7b_fp16.gguf models/llama_7b_q4_0.gguf q4_0 +``` + +## Test +```bash +# For all models. +./build/bin/main -m models/llama_7b_q4_0.gguf -n 128 --prompt "Once upon a time" +``` + +## Benchmark +The perplexity measurements in table above are done against the `wikitext2` test dataset (https://paperswithcode.com/dataset/wikitext-2), with context length of 512. +```bash +# For llama and llama2, and mistral models. +./perplexity -m models/llama_7b_q4_0.gguf -f datasets/wikitext-2-raw/wiki.test.raw +``` + +## Results +Results are run on OpenBLAS (CPU) and CuBLAS (GPU) for fair comparison +We use three types of llamacpp quantization methods to work with our version, including q4_0, q4_1, and q2_k + +### Llama 7B (Build with OpenBLAS) + +| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K | +|-----------:|--------------|-------:|-------:|-------:|-------:| +|Llama 7B | perplexity | 5.9066 | 6.1214 | 6.0643 | 6.5808 | +|Llama 7B | file size | 12.9G | 3.5G | 3.9G | 2.7G | +|Llama 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | +|AWQ-LLama 7B| perplexity | 5.9175 | 6.0252 | 5.9987 | 6.3692 | +|AWQ-LLama 7B| file size | 12.9G | 3.5G | 3.9G | 2.7G | +|AWQ-LLama 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | + + +### Llama2 7B (Build with CuBLAS) + +| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K | +|------------:|--------------|-------:|-------:|-------:|-------:| +|Llama2 7B | perplexity | 5.8664 | 6.0260 | 6.0656 | 6.4496 | +|Llama2 7B | file size | 12.9G | 3.5G | 3.9G | 2.7G | +|Llama2 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | +|AWQ-LLama2 7B| perplexity | 5.8801 | 6.0054 | 5.9849 | 6.3650 | +|AWQ-LLama2 7B| file size | 12.9G | 3.5G | 3.9G | 2.7G | +|AWQ-LLama2 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | + + +### Mistral 7B v0.1 (Build with CuBLAS) + +| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K | +|-------------:|--------------|-------:|-------:|-------:|-------:| +|Mistral 7B | perplexity | 5.6931 | 5.8202 | 5.8268 | 6.1645 | +|Mistral 7B | file size | 14.5G | 4.1G | 4.5G | 3.1G | +|Mistral 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | +|AWQ-Mistral 7B| perplexity | 5.6934 | 5.8020 | 5.7691 | 6.0426 | +|AWQ-Mistral 7B| file size | 14.5G | 4.1G | 4.5G | 3.1G | +|AWQ-Mistral 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | + +### MPT 7B (Build with OpenBLAS) + +| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K | +|---------:|--------------|-------:|-------:|-------:|--------:| +|MPT 7B | perplexity | 8.4369 | 8.7956 | 8.6265 | 11.4913 | +|MPT 7B | file size | 13.7G | 3.9G | 4.3G | 2.8G | +|MPT 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | +|AWQ-MPT 7B| perplexity | 8.4944 | 8.7053 | 8.6750 | 10.2873| +|AWQ-MPT 7B| file size | 13.7G | 3.9G | 4.3G | 2.8G | +|AWQ-MPT 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 | diff --git a/awq-py/awq/apply_awq.py b/awq-py/awq/apply_awq.py new file mode 100644 index 000000000..11132c5d2 --- /dev/null +++ b/awq-py/awq/apply_awq.py @@ -0,0 +1,254 @@ +""" +Implements the AWQ for llama.cpp use cases. +Original paper: https://arxiv.org/abs/2306.00978 + +This code is based on versions of the AWQ implementation found in the following repositories: +* https://github.com/mit-han-lab/llm-awq +* https://github.com/casper-hansen/AutoAWQ +""" + +import os +import torch +import torch.nn as nn + +from transformers import AutoModelForCausalLM, AutoConfig +from transformers.models.bloom.modeling_bloom import BloomGelu +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from transformers.activations import GELUActivation + + +class ScaledActivation(nn.Module): + """ + ScaledActivation module wraps an existing activation function and applies a + scale factor to its output. + + Args: + module (nn.Module): The activation function to be scaled. + scales (torch.Tensor): A tensor of size (num_features,) containing the initial + scale factors for each feature. + + Returns: + torch.Tensor: The scaled output of the activation function. + """ + + def __init__(self, module, scales): + super().__init__() + self.act = module + self.scales = nn.Parameter(scales.data) + + def forward(self, x): + return self.act(x) / self.scales.view(1, 1, -1).to(x.device) + + +def set_op_by_name(layer, name, new_module): + """ + Set the new module for given module's name. + + Args: + layer (nn.Module): The layer in which to replace the submodule. + name (str): The path to the submodule to be replaced, using dot notation + to access nested modules. + new_module (nn.Module): The new module to replace the existing one. + """ + levels = name.split(".") + if len(levels) > 1: + mod_ = layer + for l_idx in range(len(levels) - 1): + if levels[l_idx].isdigit(): + mod_ = mod_[int(levels[l_idx])] + else: + mod_ = getattr(mod_, levels[l_idx]) + setattr(mod_, levels[-1], new_module) + else: + setattr(layer, name, new_module) + + +def get_op_by_name(module, op_name): + """ + Retrieves a submodule within a given layer based on its name. + + Args: + module (nn.Module): The layer containing the submodule to find. + op_name (str): The name of the submodule. + + Returns: + nn.Module: The requested submodule found within the given layer. + + Raises: + ValueError: If the specified submodule cannot be found within the layer. + """ + for name, m in module.named_modules(): + if name == op_name: + return m + raise ValueError(f"Cannot find op {op_name} in module {module}") + + +@torch.no_grad() +def scale_ln_fcs(ln, fcs, scales): + """ + Scales the weights of a LayerNorm and a list of fully-connected layers proportionally. + + Args: + ln (nn.LayerNorm): The LayerNorm module to be scaled. + fcs (List[nn.Linear]): A list of fully-connected layers to be scaled. + scales (torch.Tensor): A 1D tensor of size (num_features,). + """ + + if not isinstance(fcs, list): + fcs = [fcs] + + scales = scales.to(ln.weight.device) + + ln.weight.div_(scales) + if hasattr(ln, "bias") and ln.bias is not None: + ln.bias.div_(scales) + + for fc in fcs: + fc.weight.mul_(scales.view(1, -1)) + + for p in ln.parameters(): + assert torch.isnan(p).sum() == 0 + for fc in fcs: + for p in fc.parameters(): + assert torch.isnan(p).sum() == 0 + + +@torch.no_grad() +def scale_fc_fc(fc1, fc2, scales): + """ + Scales the weights of two fully-connected layers in a specific pattern. + + Args: + fc1 (nn.Linear): The first fully-connected layer to be scaled. + fc2 (nn.Linear): The second fully-connected layer to be scaled. + scales (torch.Tensor): A 1D tensor of size (num_features,). + """ + assert isinstance(fc1, nn.Linear) + assert isinstance(fc2, nn.Linear) + + scales = scales.to(fc1.weight.device) + + fc1.weight[-scales.size(0):].div_(scales.view(-1, 1)) + if fc1.bias is not None: + fc1.bias.div_(scales.view(-1)) + + fc2.weight.mul_(scales.view(1, -1)) + + for p in fc1.parameters(): + assert torch.isnan(p).sum() == 0 + for p in fc2.parameters(): + assert torch.isnan(p).sum() == 0 + + +@torch.no_grad() +def scale_gelu_fc(gelu, fc, scales): + """ + Scales the weight of a GELU activation and a fully-connected layer proportionally. + + Args: + gelu (Union[nn.GELU, BloomGelu, GELUActivation]): The GELU activation module to be scaled. + fc (nn.Linear): The fully-connected layer to be scaled. + scales (torch.Tensor): A 1D tensor of size (num_features,). + + Raises: + TypeError: If the `gelu` module is not of type `nn.GELU`, `BloomGelu`, or `GELUActivation`. + TypeError: If the `fc` module is not of type `nn.Linear`. + """ + assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation)) + assert isinstance(fc, nn.Linear) + + fc.weight.mul_(scales.view(1, -1).to(fc.weight.device)) + + for p in fc.parameters(): + assert torch.isnan(p).sum() == 0 + + +def apply_scale(module, scales_list, input_feat_dict=None): + """ + Applies different scaling strategies to layers based on their type and hierarchy within a given module. + + Args: + module (nn.Module): The module containing the layers to be scaled. + scales_list (List[Tuple[str, List[str], torch.Tensor]]): A list of tuples containing: + * prev_op_name (str): The name of the preceding operation or module, + relative to which the layers to be scaled are located. + * layer_names (List[str]): A list of names of the layers to be scaled, relative to the preceding operation. + * scales (torch.Tensor): A 1D tensor of size (num_features,) containing the scaling factors for each feature. + input_feat_dict (Optional[Dict[str, torch.Tensor]]): A dictionary mapping layer names to their corresponding + input features (optional). + """ + for prev_op_name, layer_names, scales in scales_list: + prev_op = get_op_by_name(module, prev_op_name) + layers = [get_op_by_name(module, name) for name in layer_names] + + prev_op.cuda() + for layer in layers: + layer.cuda() + scales.cuda() + + if isinstance(prev_op, nn.Linear): + assert len(layers) == 1 + scale_fc_fc(prev_op, layers[0], scales) + elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm)) or "rmsnorm" in str(prev_op.__class__).lower(): + scale_ln_fcs(prev_op, layers, scales) + elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)): + new_module = ScaledActivation(prev_op, scales) + set_op_by_name(module, prev_op_name, new_module) + scale_gelu_fc(prev_op, layers[0], scales) + else: + raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!") + + # apply the scaling to input feat if given; prepare it for clipping + if input_feat_dict is not None: + for layer_name in layer_names: + inp = input_feat_dict[layer_name] + inp.div_(scales.view(1, -1).to(inp.device)) + + prev_op.cpu() + for layer in layers: + layer.cpu() + scales.cpu() + + +@torch.no_grad() +def apply_clip(module, clip_list): + """ + Applies element-wise clipping to the weight of a specific layer within a given module. + + Args: + module (nn.Module): The module containing the layer to be clipped. + clip_list (List[Tuple[str, torch.Tensor]]): A list of tuples containing: + * name (str): The name of the layer to be clipped, relative to the root of the module. + * max_val (torch.Tensor): A 1D or 2D tensor defining the upper bound for each element of the layer's weight. + """ + for name, max_val in clip_list: + layer = get_op_by_name(module, name) + layer.cuda() + max_val = max_val.to(layer.weight.device) + org_shape = layer.weight.shape + layer.weight.data = layer.weight.data.reshape(*max_val.shape[:2], -1) + layer.weight.data = torch.clamp(layer.weight.data, -max_val, max_val) + layer.weight.data = layer.weight.data.reshape(org_shape) + layer.cpu() + + +def add_scale_weights(model_path, scale_path, tmp_path): + """ + Adds pre-computed Activation Weight Quantization (AWQ) results to a model, + including scaling factors and clipping bounds. + + Args: + model_path (str): Path to the pre-trained model to be equipped with AWQ. + scale_path (str): Path to the AWQ scale factors (.pt file). + tmp_path (str): Path to the temporary directory where the equipped model will be saved. + """ + config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained( + model_path, config=config, trust_remote_code=True + ) + model.eval() + awq_results = torch.load(str(scale_path), map_location="cpu") + apply_scale(model, awq_results["scale"]) + apply_clip(model, awq_results["clip"]) + model.save_pretrained(str(tmp_path)) + os.system(f"cp {str(model_path)}/tokenizer* {str(tmp_path)}") diff --git a/awq-py/requirements.txt b/awq-py/requirements.txt new file mode 100644 index 000000000..5fe604329 --- /dev/null +++ b/awq-py/requirements.txt @@ -0,0 +1,2 @@ +torch>=2.0.0 +transformers>=4.32.0 diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 303d08170..7dbc28147 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -46,7 +46,7 @@ class Model: self.part_names = self._get_part_names() self.hparams = Model.load_hparams(self.dir_model) self.model_arch = self._get_model_architecture() - self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess) + self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=False) def set_vocab(self): self._set_vocab_gpt2() @@ -59,7 +59,7 @@ class Model: from safetensors import safe_open ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu")) else: - ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) + ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", weights_only=True)) with ctx as model_part: for name in model_part.keys(): @@ -464,7 +464,11 @@ class MPTModel(Model): data = data_torch.squeeze().numpy() # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if "scales" in name: + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales")) + new_name = new_name.replace("scales", "act.scales") + else: + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) if new_name is None: print(f"Can not map tensor {name!r}") sys.exit() @@ -1095,6 +1099,9 @@ def parse_args() -> argparse.Namespace: "--vocab-only", action="store_true", help="extract only the vocab", ) + parser.add_argument( + "--awq-path", type=Path, default=None, + help="Path to scale awq cache file") parser.add_argument( "--outfile", type=Path, help="path to write to; default: based on input", @@ -1115,6 +1122,20 @@ def parse_args() -> argparse.Namespace: args = parse_args() dir_model = args.model + +if args.awq_path: + sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) + from awq.apply_awq import add_scale_weights + tmp_model_path = args.model / "weighted_model" + dir_model = tmp_model_path + if tmp_model_path.is_dir(): + print(f"{tmp_model_path} exists as a weighted model.") + else: + tmp_model_path.mkdir(parents=True, exist_ok=True) + print("Saving new weighted model ...") + add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path)) + print(f"Saved weighted model at {tmp_model_path}.") + if not dir_model.is_dir(): print(f'Error: {args.model} is not a directory', file=sys.stderr) sys.exit(1) diff --git a/convert.py b/convert.py index 1f0c4f2f4..c3f3fc0a1 100755 --- a/convert.py +++ b/convert.py @@ -1187,6 +1187,7 @@ def main(args_in: list[str] | None = None) -> None: # We currently only support Q8_0 output on little endian systems. output_choices.append("q8_0") parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") + parser.add_argument("--awq-path", type=Path, help="Path to scale awq cache file", default=None) parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") @@ -1200,6 +1201,19 @@ def main(args_in: list[str] | None = None) -> None: parser.add_argument("--padvocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides") args = parser.parse_args(args_in) + if args.awq_path: + sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) + from awq.apply_awq import add_scale_weights + tmp_model_path = args.model / "weighted_model" + if tmp_model_path.is_dir(): + print(f"{tmp_model_path} exists as a weighted model.") + else: + tmp_model_path.mkdir(parents=True, exist_ok=True) + print("Saving new weighted model ...") + add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path)) + print(f"Saved weighted model at {tmp_model_path}.") + args.model = tmp_model_path + if args.dump_single: model_plus = lazy_load_file(args.model) do_dump_model(model_plus) diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 4cd87cdda..c9be21119 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -120,6 +120,7 @@ class MODEL_TENSOR(IntEnum): FFN_GATE = auto() FFN_DOWN = auto() FFN_UP = auto() + FFN_ACT = auto() FFN_GATE_EXP = auto() FFN_DOWN_EXP = auto() FFN_UP_EXP = auto() @@ -169,6 +170,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = { MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate", MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", + MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn", MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate.{xid}", MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down.{xid}", MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up.{xid}", @@ -269,6 +271,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_NORM, MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_ACT, ], MODEL_ARCH.GPTJ: [ MODEL_TENSOR.TOKEN_EMBD, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 446c6b688..0b8f70417 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -188,6 +188,11 @@ class TensorNameMap: "model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral ), + # AWQ-activation gate + MODEL_TENSOR.FFN_ACT: ( + "transformer.blocks.{bid}.ffn.act", # mpt + ), + # Feed-forward gate MODEL_TENSOR.FFN_GATE: ( "model.layers.{bid}.mlp.gate_proj", # llama-hf refact diff --git a/llama.cpp b/llama.cpp index 4aa59c4c0..bf1b01a90 100644 --- a/llama.cpp +++ b/llama.cpp @@ -354,6 +354,7 @@ enum llm_tensor { LLM_TENSOR_FFN_GATE, LLM_TENSOR_FFN_DOWN, LLM_TENSOR_FFN_UP, + LLM_TENSOR_FFN_ACT, LLM_TENSOR_FFN_DOWN_EXP, LLM_TENSOR_FFN_GATE_EXP, LLM_TENSOR_FFN_UP_EXP, @@ -473,6 +474,7 @@ static std::map> LLM_TENSOR_NAMES = { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" }, }, }, { @@ -1285,6 +1287,7 @@ struct llama_hparams { float f_clamp_kqv; float f_max_alibi_bias; + bool operator!=(const llama_hparams & other) const { if (this->vocab_only != other.vocab_only) return true; if (this->n_vocab != other.n_vocab) return true; @@ -1388,6 +1391,7 @@ struct llama_layer { // ff bias struct ggml_tensor * ffn_down_b; // b2 struct ggml_tensor * ffn_up_b; // b3 + struct ggml_tensor * ffn_act; }; struct llama_kv_cell { @@ -3471,7 +3475,6 @@ static bool llm_load_tensors( case LLM_ARCH_MPT: { model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - // output { ggml_backend_type backend_norm; @@ -3509,6 +3512,9 @@ static bool llm_load_tensors( layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + + // AWQ ScaleActivation layer + layer.ffn_act = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, backend, false); } } break; case LLM_ARCH_STABLELM: @@ -4039,6 +4045,7 @@ static struct ggml_tensor * llm_build_ffn( struct ggml_tensor * gate_b, struct ggml_tensor * down, struct ggml_tensor * down_b, + struct ggml_tensor * act_scales, llm_ffn_op_type type_op, llm_ffn_gate_type type_gate, const llm_build_cb & cb, @@ -4083,6 +4090,10 @@ static struct ggml_tensor * llm_build_ffn( { cur = ggml_gelu(ctx, cur); cb(cur, "ffn_gelu", il); + if (act_scales != NULL) { + cur = ggml_div(ctx, cur, act_scales); + cb(cur, "ffn_act", il); + } } break; case LLM_FFN_RELU: { @@ -4401,6 +4412,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } else { @@ -4580,6 +4592,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } @@ -4694,6 +4707,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, NULL, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); } @@ -4798,6 +4812,7 @@ struct llm_build_context { model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); } @@ -5002,6 +5017,7 @@ struct llm_build_context { model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); } @@ -5088,6 +5104,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } @@ -5183,6 +5200,7 @@ struct llm_build_context { model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); } @@ -5268,11 +5286,11 @@ struct llm_build_context { NULL, LLM_NORM, cb, il); cb(cur, "ffn_norm", il); - cur = llm_build_ffn(ctx0, cur, model.layers[il].ffn_up, NULL, NULL, NULL, model.layers[il].ffn_down, NULL, + model.layers[il].ffn_act, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); } @@ -5381,6 +5399,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } @@ -5493,6 +5512,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } @@ -5600,6 +5620,7 @@ struct llm_build_context { model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(ffn_output, "ffn_out", il); } @@ -5703,6 +5724,7 @@ struct llm_build_context { model.layers[il].ffn_up, NULL, model.layers[il].ffn_gate, NULL, model.layers[il].ffn_down, NULL, + NULL, LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); } @@ -5887,6 +5909,7 @@ static const std::unordered_map k_offload_map { "ffn_gate", OFFLOAD_FUNC }, { "ffn_gate_b", OFFLOAD_FUNC }, { "ffn_gate_par", OFFLOAD_FUNC }, + { "ffn_act", OFFLOAD_FUNC }, { "ffn_down", OFFLOAD_FUNC }, { "ffn_down_b", OFFLOAD_FUNC }, { "ffn_out", OFFLOAD_FUNC }, From ea5497df5d138c83b2b0ca70aefdc4b1175c1001 Mon Sep 17 00:00:00 2001 From: manikbhandari Date: Thu, 28 Dec 2023 09:03:57 -0500 Subject: [PATCH 64/84] gpt2 : Add gpt2 architecture integration (#4555) --- README.md | 1 + convert-hf-to-gguf.py | 66 +++++++++++ gguf-py/gguf/constants.py | 11 +- gguf-py/gguf/tensor_mapping.py | 10 +- llama.cpp | 206 +++++++++++++++++++++++++++++++-- models/ggml-vocab-gpt2.gguf | Bin 0 -> 1766799 bytes tests/CMakeLists.txt | 1 + 7 files changed, 281 insertions(+), 14 deletions(-) create mode 100644 models/ggml-vocab-gpt2.gguf diff --git a/README.md b/README.md index 3b202a336..48dcd6464 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,7 @@ as the main playground for developing new features for the [ggml](https://github - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) - [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral) - [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) +- [x] [GPT-2](https://huggingface.co/gpt2) **Multimodal models:** diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 7dbc28147..3557a825e 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -182,6 +182,8 @@ class Model: return QwenModel if model_architecture == "MixtralForCausalLM": return MixtralModel + if model_architecture == "GPT2LMHeadModel": + return GPT2Model if model_architecture == "PhiForCausalLM": return Phi2Model if model_architecture == "PlamoForCausalLM": @@ -225,6 +227,8 @@ class Model: return gguf.MODEL_ARCH.QWEN if arch == "MixtralForCausalLM": return gguf.MODEL_ARCH.LLAMA + if arch == "GPT2LMHeadModel": + return gguf.MODEL_ARCH.GPT2 if arch == "PhiForCausalLM": return gguf.MODEL_ARCH.PHI2 if arch == "PlamoForCausalLM": @@ -993,6 +997,68 @@ class QwenModel(Model): self.gguf_writer.add_tensor(new_name, data) +class GPT2Model(Model): + def set_gguf_parameters(self): + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_block_count(self.hparams["n_layer"]) + self.gguf_writer.add_context_length(self.hparams["n_ctx"]) + self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) + self.gguf_writer.add_head_count(self.hparams["n_head"]) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + for name, data_torch in self.get_tensors(): + # we don't need these + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias")): + continue + + if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): + data_torch = data_torch.transpose(1, 0) + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + # note: GPT2 output is tied to (same as) wte in original model + if new_name == "token_embd.weight": + print(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + self.gguf_writer.add_tensor("output.weight", data) + + class Phi2Model(Model): def set_gguf_parameters(self): block_count = self.hparams["n_layer"] diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index c9be21119..ae62cc575 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -370,7 +370,16 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_UP, ], MODEL_ARCH.GPT2: [ - # TODO + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, ], MODEL_ARCH.PHI2: [ MODEL_TENSOR.TOKEN_EMBD, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 0b8f70417..80c1d5449 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -17,6 +17,7 @@ class TensorNameMap: "tok_embeddings", # llama-pth "embeddings.word_embeddings", # bert "language_model.embedding.word_embeddings", # persimmon + "wte", # gpt2 "transformer.embd.wte", # phi2 ), @@ -34,6 +35,7 @@ class TensorNameMap: MODEL_TENSOR.POS_EMBD: ( "transformer.wpe", # gpt2 "embeddings.position_embeddings", # bert + "wpe", # gpt2 ), # Output @@ -53,7 +55,7 @@ class TensorNameMap: "norm", # llama-pth "embeddings.LayerNorm", # bert "transformer.norm_f", # mpt - "ln_f", # refact bloom qwen + "ln_f", # refact bloom qwen gpt2 "language_model.encoder.final_layernorm", # persimmon "lm_head.ln", # phi2 ), @@ -78,6 +80,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.output.LayerNorm", # bert "language_model.encoder.layers.{bid}.input_layernorm", # persimmon "model.layers.{bid}.ln1", # yi + "h.{bid}.ln_1", # gpt2 "transformer.h.{bid}.ln", # phi2 "model.layers.layers.{bid}.norm", # plamo ), @@ -95,6 +98,7 @@ class TensorNameMap: "transformer.h.{bid}.self_attention.query_key_value", # falcon "h.{bid}.self_attention.query_key_value", # bloom "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon + "h.{bid}.attn.c_attn", # gpt2 "transformer.h.{bid}.mixer.Wqkv", # phi2 ), @@ -137,6 +141,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.output.dense", # bert "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon + "h.{bid}.attn.c_proj", # gpt2 "transformer.h.{bid}.mixer.out_proj", # phi2 "model.layers.layers.{bid}.self_attn.o_proj", # plamo ), @@ -159,6 +164,7 @@ class TensorNameMap: "encoder.layer.{bid}.output.LayerNorm", # bert "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon "model.layers.{bid}.ln2", # yi + "h.{bid}.ln_2", # gpt2 ), MODEL_TENSOR.FFN_GATE_INP: ( @@ -179,6 +185,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.fc_in", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon "transformer.h.{bid}.mlp.w1", # qwen + "h.{bid}.mlp.c_fc", # gpt2 "transformer.h.{bid}.mlp.fc1", # phi2 "model.layers.layers.{bid}.mlp.up_proj", # plamo ), @@ -218,6 +225,7 @@ class TensorNameMap: "encoder.layer.{bid}.output.dense", # bert "transformer.h.{bid}.mlp.fc_out", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "h.{bid}.mlp.c_proj", # gpt2 "transformer.h.{bid}.mlp.fc2", # phi2 "model.layers.layers.{bid}.mlp.down_proj", # plamo ), diff --git a/llama.cpp b/llama.cpp index bf1b01a90..68c7cced6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -423,6 +423,15 @@ static std::map> LLM_TENSOR_NAMES = LLM_ARCH_GPT2, { { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, }, }, { @@ -1256,6 +1265,10 @@ enum e_model { MODEL_40B, MODEL_65B, MODEL_70B, + MODEL_SMALL, + MODEL_MEDIUM, + MODEL_LARGE, + MODEL_XL, }; static const size_t kiB = 1024; @@ -2552,18 +2565,22 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { static const char * llama_model_type_name(e_model type) { switch (type) { - case MODEL_1B: return "1B"; - case MODEL_3B: return "3B"; - case MODEL_7B: return "7B"; - case MODEL_8B: return "8B"; - case MODEL_13B: return "13B"; - case MODEL_15B: return "15B"; - case MODEL_30B: return "30B"; - case MODEL_34B: return "34B"; - case MODEL_40B: return "40B"; - case MODEL_65B: return "65B"; - case MODEL_70B: return "70B"; - default: return "?B"; + case MODEL_1B: return "1B"; + case MODEL_3B: return "3B"; + case MODEL_7B: return "7B"; + case MODEL_8B: return "8B"; + case MODEL_13B: return "13B"; + case MODEL_15B: return "15B"; + case MODEL_30B: return "30B"; + case MODEL_34B: return "34B"; + case MODEL_40B: return "40B"; + case MODEL_65B: return "65B"; + case MODEL_70B: return "70B"; + case MODEL_SMALL: return "0.1B"; + case MODEL_MEDIUM: return "0.4B"; + case MODEL_LARGE: return "0.8B"; + case MODEL_XL: return "1.5B"; + default: return "?B"; } } @@ -2782,6 +2799,17 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_GPT2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 12: model.type = e_model::MODEL_SMALL; break; + case 24: model.type = e_model::MODEL_MEDIUM; break; + case 36: model.type = e_model::MODEL_LARGE; break; + case 48: model.type = e_model::MODEL_XL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; default: (void)0; } @@ -3710,6 +3738,60 @@ static bool llm_load_tensors( layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); } } break; + case LLM_ARCH_GPT2: + { + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); + + // output + { + ggml_backend_type backend_norm; + ggml_backend_type backend_output; + + if (n_gpu_layers > int(n_layer)) { + backend_norm = llama_backend_offload; + backend_output = llama_backend_offload_split; + } else { + backend_norm = GGML_BACKEND_CPU; + backend_output = GGML_BACKEND_CPU; + } + + model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); + model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); + model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + } + + const uint32_t n_ff = hparams.n_ff; + + const int i_gpu_start = n_layer - n_gpu_layers; + + model.layers.resize(n_layer); + + for (uint32_t i = 0; i < n_layer; ++i) { + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + + auto & layer = model.layers[i]; + + layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + + layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + + layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + + layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -5754,6 +5836,102 @@ struct llm_build_context { return gf; } + + struct ggml_cgraph * build_gpt2() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); + + struct ggml_tensor * cur; + struct ggml_tensor * pos; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); + + pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); + + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); + + for (int il = 0; il < n_layer; ++il) { + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(ctx0, model, hparams, kv_self, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il); + cb(cur, "kqv_out", il); + } + + // add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } + + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); + } + + cur = llm_build_norm(ctx0, inpL, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); + + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } }; // @@ -6269,6 +6447,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_plamo(); } break; + case LLM_ARCH_GPT2: + { + result = llm.build_gpt2(); + } break; default: GGML_ASSERT(false); } diff --git a/models/ggml-vocab-gpt2.gguf b/models/ggml-vocab-gpt2.gguf new file mode 100644 index 0000000000000000000000000000000000000000..1fbc72c1e4d9e210e5c5689b31e1debfa33d4b6a GIT binary patch literal 1766799 zcmZs^`I97Ba^FYYwgw*99`uE-njL~9w2aEis*35X zjNClBx`rf70(4UMeM>2Tph$@#MM&;i)TMzEv>ZYeBw{QJ3 zISylfbTS(zZ+<)(HuH6SE57m1`TfZ-Ur*nye>|Jc&({}iI{fEf@JFVXC)3l@etzx` zvDkmnA3B>(Pk($iEU&udsXxaD|0RE}Td$|{bwA8Mx|nvSq zKb|gD{cM>3!#wy`e(%Fi{@Hxt24ZLZZ2IH%^ATNG4FaA(o{NcR#BYE**UVJAn zzMB_6mlr>u7vIZ^@8`uwdGUk1xRn>T^Wsik+|7%7d2v54KF*6z^5WCHc#s!A%!^;h zi$9tdznB-llo!987k?}-4)Wq*UL5Adqr7;W7f%7?H z#oN5N%8Pe-ah(_6Zra$o0|EaEiyRLqxuKq$@{cc_T#k%^R z>*_Dn)nBfw|D~?}*Sh*Eb@f;4>aW$+|6W&ry{`U`y81us>i?>%zfo6zv#$Q{y82sn z_5ak>|65moyRQCDUH#p<`v2duwYvJvy85lU`q%5~->9p9v#$QFy85^4 z>ffoWKVMh>Ze9I*b@lJp)qhY||6yJIM|Jfd*VTVgSN~~U{bzOcpV!rYQCI(EUHw;e z^#C^keNo-}qPq7*b?=Mn-WS!qFRFWARQJB9 z?tM|+`=Yw{MRo80x0*rG-}|D!_eFp2i~imh{k<>xdtdbTzUc3L(ck-`zxPFd?~DH4 z7yZ32`g>pW_rB=webL|hqQ70y->&FySM;|l`r8%#?TY?(MSr`Zzg^MauIO)9^tUVe z+ZFxoivD&*f4ic;UD4mJ=x&FySM;|l`r8%#?TY?(MSr`Zzg^MauIO)9^tUVe+ZFxoivD&*f4ic; zUD4mJ=xvZG5*~>$1xocQMVYex6s$`r9!7hi)ViZ|l+NQX{SMt&j2>q-OFM22PFov!rqN&WFue>}@e)bqUDZ5R3IuxT1Si`i|v8o!(6{cfrI>p=rt z>d(WOu6tXHP2xh(>gK08c3-RQYK^m|nFnkGu=zc#fqZGW%&XZfZ(}dy$(6>x+D|uP;xg`Rs0cJ=C}RqKV!1p{8+FBMkPyD}m*T zkL34u+uM1$)>0>X(-$bRQEUNMMj7A9dCrUWdr|3b`zY`CwJUeqJNd`12y42m{qV+y z{~)Ke+kT!eO)mupEOkBcl`dcB<;8TVr>{C=9R=5+X1?1#$&YLbXx-ZI)fE9R`+Rh< z$d3(6L(#G0c~zTjv(kJP%RvacC_cQg1$ImO`e{L?6W4*eXpo_(vp+BFPDPUasus|l zA# zD}w3;^X0TQ1b(Z0@%?Y_emln-HZ|wn_KSQ1CQ&3c?P`1VZ}R~rSBM+LM+ed5c`>bp zEq_tV?^eYxciV{x?>9BIOYO8;23t|22x@XdyX~oM!1KW#l6XF^0Y9`=f0-X#x;nAy z^3S{N*Bb3)W7EDGj3XJo@PF#%S`Y3{k<}HfHMhg?M&n;y*hA=M1?B#MV5DjfGFFyGj+S2f@MvH-V`Q2wYy;)(v)e^Zd37^w|&ql;D7*Q|G2ag3!s zgIZDeo=mj%W77mCMS|19VW$;zC$n04zYuJ~r!J6RT^Q~4oxapx>Y?rgylaH3sbnDv zSuy*~i3szeD~7e(PMc4Bk7FcQ$*Avrpzl4M>w06?(oA|I>q#xGD}m50uQlbXkO_s0 zRSALRwbl`wG^u1#tZf)-&$G_FFKXCrf0X0)=OuW1Tl;WkR>`_(=dBxCzgA<2^$)9h zmMBasifejuLpZ^%@^=!6dj^spblyUkZZ|~-S$?#f+ktS3U-pS ztXmj$Pk4)ItW6VHxE<54YY*+V%W0ttxhajj(+rnVQM+aFv|(xUMg^)Hom7;;nAyrz zHj_$MW$*R#8e`VmtZqNZm18p&JL^}qx#p)Lh;Fm8bzk&l2%$tI8Sl1#QX|S@!)H)W z_c8=f4oXpAFW9Pwxv9UinZUhT6jmm+rl~af?sB1rJD1BRwZVE6k(}^mS@Pkm^o?OS z)gTu`q3%Mu%x?Rk7)=-ZCW)oH(z^t!^UuTEymCcjL2NpwGF_Hc*H29T6ML7O?ANxF zI#U47dLyRY_G6obE6aZfN981U+p|)?M>1u& zicF0CJE36i{zgq%zMJ0NqL(P`f)Mj5L=0%!nklPX+IqgUC9kICEFD$_qm6qQro|(7 z+h5cwUKdn)@t>uC2F@f?kFTr%OuKi2ZD#B{=^ zQZjnC{hnUET`CE>y}_o_8a5}0at*tjn%Z=r{nft{&VxDycQ_DSBu8i_NdADNE=#rN zj{d&doM?o$dH$v_elj(_&@IiD|EduAi$&Qz*01JvI+z5RpBK$9JKGfx5*<@HC>O=j zu*gnxGR^SlO5{VdOaIoDaZISc#6b6Ev`Mf`O83|-xs`xG(bRNdNfHv1B6-y8d>lki z3s+1{e$hxg6F^UTn+>M7PlnPQdh>LX_9?ixl-T^- zZhI)z0d+-xKeOmlU*>*4Eq1hLqDiWPdWdQ|go87FQpT@ot9#E=E+gx^&+9y{dkJJkPdwtveg9io7NiT63N&vcp0E(Y; zR)%(1`|(ecIA{2T(qy`@&9Rax`q4)pX+U5hvuyfCY<4*fhln18)0~=4vsznNb~Y_l zwKGpos6840BYk$N@nOIi9|Hb-QyVVww{sPEQHpnU;$3*YYoWXC)3e+eKlnkuxZ6H6 zF20-^ES4j#ytD`af+lCQaW@GtGE-)4?rDhH1P;bSZ8UV4#5AEl!QmYGGpWBS#Ba_8 zorAVFh1eB3r+!@!qZN^0F$R(4!U@S56L=(Y8_KYJYQR7N)lkiTav#uzg?-QEx9h%1 zFkyrUW^nd*=J^~-uMYLwCUssLd$)yOnpb~0{Ym+|rjj1Kogdt7UoFch>sM~wyLmbT zL&`&k1f^oL&tHrMGeL)dG0QH1Ilt! zukW@$)CwWD{6Vx}veq=uzQu%3C6IeWqeujnT6^;Log58*F1dzvTM~kuT8~dn*`R76 zp}4uJTtGJA&yHHlb}dlD`>4@W&U zCNa4XJ%W=2ByK1Z{H>}gZ}r^DdEw&E%U)~SWzO>MYy^WR&Vk8>A5riF&ifTM02i1y1(BN@SU;}K)%XQEHcOJ#y~U*{AylPrteZTEsOY#RYKQaeeV zNWkv4vuUx&pkMdwFE++Ieu7n5B-zc4ld`jlP24e7KX=OwZZk2>c^3H9Brxd(f<|L2 zTWPo}bsLYvNRI*m>!oQfp&ST}w?EZD@Y3N*Kk?P{CDtYvgG|F=Z3~BdJr9P$%~lGz zx9gz>k`}a(*3*n?ewy2wuwi21gCFJq1h8qH0DHfK>yuV%952T3S7vdv! zx-s@}NvL0|K`)d7x7%LXw2w)f_@_#gBJER&$Q5IT7tA6oQ;IoSk-krKh%Jy1cM3v9 zDx3pc*v0XPqBIG=}wdbd< z-6znUR!9!ldWrkxW@L$I123C)m;=w)%(?9I@;+m9l=wcE0}(&Nh%d~hXGX?&P%ycL zlhbZpyBRvYEpTD75@pwKuxuW6rc|qdY@F_;mZ{h&n=K zAS36v+x{8N@qAeSXu#fyb6FhqpOd;#|0%q&i`^ zhHjBx=)*0dXD6dnoP{cnZr@9sDW8pKdvPKXj-{}WmRt0{m<@IPi~Q5KNVzco##4!h z>8zkh$Pj4fWFy6+$l=HQOE8CdU`t#M5tSMJ;hd0U4nxG-C;r+@?vImZHp{|Wgj-72 ziB1+}ULaVtoM_yUUqJD8R(Xb7Mq3n12$en`eH)2Q3ELBM0EW@*+!vfWDU>eWFU?E| ze}MV(PCQ2i6*@nPA={M)iPjwXeRq(YE=yjbT_>bN@Z~5t;GC5%I@8c`SB2r7uR+Gf zd=^8gZtEgxp2koXn7Y|mQeL{hQ-l(N5rHbCbP+kuk3{q)v2y#a zpaLB6_v$wu6XNd1i{f_hg=BCqR!~>g5xL6od98nSB`yE~ zh4V}|Z{N*p2#U3O#TC)U;z1~l7ecoA z6E=urp?B3Rx+H{?Vne&_bA@k#`PrEefUDH@8b)qHa$}zEd1+{hRk;m~9%0!&_s)r z0eYfkb&ba;pZL(Q_|m8>Oh1$Nth1C2JO{QZ@ULiT;Z)Ls&=?Y?{ZFb+_4X(EM*=rO z2Zo$!%~*1k0Xtk2F-8bD2`aL|fMqV^;6>hOGPajKOt@0FX5$Rbx5a*9<15<_7$Wcs zV1zj|Qv#v|G`xs5!UT&}L^+*Dq*U*nBYt)F%eklYV2Dmp$`f{sDsSvdb3>!{=cK5q zH8kKO_&Dz@4JD)sXdS!l``>g>n6kP;^dw$7B@#@=c^g;#+!V-WJB>0bZ$EgT@gwW0 zFbCiNL<%M5Tyr_-VMwyG<)CdAiGEOtY8z$46SDltroctqsU%f5JJW~u@mQq*&PU#) zh)*a=*PN%wmK+sU<~i0+j6BJtGJ?3Ac1Fm!C~HH6_p;onh#A!#V~!WPNU;_mh9&V@ zO@UrqV$E>Yu#@%h3M0+3-d1c{3CfU~=0imN?#)wy3TxCDp2ATu4)oY}mqR4@=7dq( zT!m>axl+;DIW8HF44hqGdVT**vk8;pcH5uJ%h}Y*sPKDw5o4J?qzvWr07RRLXbQ=g z`b};tCgBHrTuDfguhbJ^4`M7|R8HkfV~hlD9v8k<&xEl^7!ibw8&B zt#>8Bp&4-igiHUwR?M`oaGmWNquZ89fcas1PP|8Ikd2M5l;!YU;aSL4)=1t!wxQ1 z0Hux+>nKYq5XaoKnU}Lg$h>fdHu3``B11*S% z%A*CtAzo0ddA(E3I{myc2+0Nyd0(RrbJi0YTBtHa{2)MmP6)l(z3$%$1E< zxc0<@uRPnY?H-pdN0D|Q=!cSbm5+`E;q z!m3^t-y|sE{G1r!VA^U^xJ0j))2Zpb#9fuRcju0FKwF#vIdWx-;VipK)!0N}P4RFR zO}O8~X;5^c0?$@$S%MzrB3ZfaInGPEl1rpJcP0B`T?~jQ8{WrUA;2B6(a^L|`%7~g z*>X=ybvk$^BB99BOT*v)=I{MxE&;8nunIFZNQOYSDPb8}z^>(em|%OZy1U+-U5rRF zT;jOJ@shq8mz5|xH8BjqpX6h$HK_*|Ob!MwyAbcyv)`@ZW%Q7{v{?fUL((bI1GOIiTI#i8t)z^z)1Sy|_y6l3&+ z-Z0JA>qPb3NeP(@G*Jt*pKeKGK62_ssx{na&^nVBqbAyz zOX(DOh%(Ch^Txt&)OCG3Hibt>Y>n{B$Y_Ddl}ZgYY|hUm3HG6KFXV}|k89yC9>-TH zgdJXl3oW+=3Zk(g@bUy;TxdLV63wdeg=NzpqG68mr~^+bI9C_l|U zF@5b4!^O65#4;miRNo;t$jLc;MZz*EAIc$F_6yrDlNlKd8fFpBWv%7cP=f_5@?*!U zVmFCgIz5hv9@+gn>JhW&OObFM9Ym)HLlod*EY3q@fG+MGQu)wyi^E%>)By%v{9p2tg>y$ zx)T6!!?YNQNVg?Ww`{r`m%yr5mXbkVPBEez2`nNgqUN)v8RaeHNsfhyrmzHiVknaD zDakU3u(CnZ+~?f5pqtB?i8)gy51F?@pKOzlzN?`r#Z|t|xU?wB94$DFu@cT(&HcFq zBXx450VUl~3i*CV^7800}YZRql1b^{t#?LP235;63oaub0-rT`w& zP^~;xR*{cNLi|4`ffvSenWp$)-L}P*=`5uqY?Mn3`c;Gj`CBWX*2;hu)=| zfM`p>M@TKhhoIS$>)u-`!P%%*G`vGAZ5`q(;lSm6Atqc>3VXUjx$%hh1tEllOn1I1 zwd!Ix)h-|%q9vV+)nRNbtx7vu9rfztGOVk`V8oHNC?K;bR@6h6LNXa1Q+<&g5Im-= zu{#R*j7t2X%&i;&cRtOH@Zf<4{t8mluk}(BAmC~cCOgu&1$K{K0@xDnUYJK$5alRw zw1}wv#0NX;N{rsUYe4KFds7OWfj=57`g9Wml}JN@GAr@(s9h55_EnMM7;rmILP^Sn z<>zHTScM*0Ug4#KWq}{a|b+>z9LM)6^TZouOVP%?`6sktM2R z@qpddiqQI%X^v2C$FS(c^z!ViZ6XjHgI zxT;WkR&ot5p!wFq5$M0kQylTZsFET=l%J;p4n!mhnX2uES8VSRklhsQM~=oFe8h#EuKd{MDQ8c!_20^I1qv22vqSucT9w@8OXv(9P#T;5ZafF)r z)Eq3QLwQajo{kkG++zi54%Ab+8iOE$xfMT*1P?cBKX(4#qHvELs94s+9O4-S?#4938)#@?|0TT*G$NJ)n$j{4#PC$vM zYwshJVzP0w?Z;t%SuZQE*7Q{A250fq*VrXv~=L_gkuCXsJ zt_^XN=NdfdG-5IVKC4Y1=+8B?lCq?U)aFB*=BmwKSj?y^iOJbUw6M>nH;oftTl17B zcvydQQq)fp7QEp7u}QSv6Chu^61Tvj&=9bdg#jGLB45a@OVg~pq(rEeIX@S5s8SJ` zk`LpE5)R`F((Y)um*YfPUS1;}tu64;)VLXu12xv{!v3Bg z7)=3 zx-g&vx|cvvoK+G;h?leARPy_GHn5s%N>=sDoDDG80PE8k0a!A-*9f~~4G zZ=&3LP0MOyvX-nNRpOH$_}c;=$$l1kI7BxS?&xs^i`KK3ly>B{I4EQ4TE;dBptd<) zG}Ou`j{Eb(mH}Hak0W`sVfX4|-P{w|aFZ9~3gtlXM^<*l79#^BNqmrBeYgE}4SYoF zoqWc78hRJcFFe-|4@B((`cWBNQR!8G7A`;Y^PwfSoSP&yg5aLf9~~G}5Z)>9P4uu# zZt^9y{bnXuX~tH}i@F%=hZWuvxmVCCLk39U#%-G;5@B7cI4|`abcqDjK%K}!QPWDq zgg>eT6Wh4_JoFX3mGtrSQC=N97e&$N2op)aBU7t`eq?57bjeP?1LYEAen@z7-|ZBN6i^-35OE_;s9EkM=Kfz zG?hc2eW*0Q@b+Qk%Q28=l7 zR(vhG`G7rX3(XZtgL^GCOr5Ev5rS)gRy^PnYU8B1DgE-+(_Xa@;~ z55D(-UIe<4I+Ik0uWG9LoTXaXLBA!aA`dcwaZ;QK&yGf!(9?0c(GymLF}9(H`R52&3S3Smy;sPu3eIc!XSdU*n15)@0C zNaeoRZU2~U#&4+s4~b3LcQ^AW#1yr1{FUh}Q2?!b_`u9d5&jEzpojl+)F8|IA593+ z351bCRBP>t5$4P(k;w%`@233om5N-WHas7pYl9^UHbVf9V7|mYOUkC`Q4JpY(^JXE zWv5mEU}0o?6PdMY>no~G5+%naPRKX=o&4Y^6nucTmp4c%%_&FLm*8*|Wk3nvz15uW z3=0dgMITQiIgE$e4m9{T0T1J;)`?W`7R&X{6#cwT9qOZyML;@Q*xcCQqNsjp6+UN6iNY8zZ%sawG)} zyIlgS&y5*%yzwu8L!zy)M-%u8hoyeug0gtQDSqU{TNE#vVvE|QEEyaPrStcryqC~$ zRFaG9O+g3P<0{fp>H|cs3bQx}i_c88i3u|njZ*NEQhI){nrriFdQzPI<&iLn?u_xM!LK4?O;%mo6=ThW&|(7Q+L#UYlqo!< zi+Ou{e7MTx3Lkf~*F|cPYzSub2U3k9Uqh>5BDPsW^-~M(KxFO`c91Y0iQ1upY8V1+E7Ef~^xU8(@m(K;R=($q6mFSo0t{D8|x(Jr2h%NIfR{|{GoReRb zFiSAseIRQ2%r?o5Syc<#`}XtM4&^wZGAMx)+OYuiuP_Z^yCnxlWK8t&Sql;5mZ6ec z)5cBrNlo#dM#;o;(>_RYtNI{pdP;KzLKGFx;3sLxLbRmmjxy=UNdl}nohkKoX7+Iq z4rPX#`_omZYMNNMV_Ful=9s*Y5I;g}1 z1a;X(b*-`}fp}(wx}x|?GvAv!ASUNEfmvzNFw)+-_C@kveIW#5(KVrU{8oA~BG$%J zZ{5!gfHG!rstK6~+Eq83)xLkW&oRh#+_lQw?sOle(^?QJ5eIi;KMeR0`wv$?E_uT( zVSNCaU)Ypd{pv!iiKs3iX#Jtp0H{(ayh)=7HGXk?SR%#KsdXUVOlR`WkVXKIUC_79 zZM1R71d}eK{Y>R{e^b1ktyjizziv4Xf_D^>YbKbzU1{a0UG*MHCQ5K=B2<~_qlexl zGrF4DTeM@J8hK%eS;&xzN>%?qJWL{^8BWZ+rQpb>N?L%P-%t)i^d<-+TJK#cY?z%wZWM~9(D*Jn7TSqaPQmJUM7SZ(Sn(V(3~HTz z#t^gZ%lyX+VLekm{#c8K8Cz_L>SSZ)AY#{iIvnCK zJ`HKX?Iilv%j()029@RuD>BX!Ph$hk{a#vG_tq+?_ellboF@9%7hLv%G{I#79}u2e zlub8b=Z0QPBsu*y_?Hb{q}jt^6Triw9OF}$@X`B7qd(^`Sy9&8snuXi--Sn1^FpTS ze3brjQ@#&M@5?9kRBibvw=!rwf=cAP;D{%E70<6+g@N^GY&fb_AR(dn(WEcK`Q9gb zZ#w^dT%9Z-!9i+^mV&2E9;R@Esig%H6kI8VttB!Q6FPDEg9KBcnIT}$`UJv$YBu5* zp{wSSBbBEV|S+g>Z0&au~W36UC9$%zM#a+a+&y<`@?q)6P=RQ45WiIV=L)QSEZrksPs zPWLLZ=MGm)L-|P1t4#|$aIf7XPq1b8Dhc?wh_&~pFcG1@+xqbh&U=Tc6 zBM{C+&gEU~XKSTS=tda4^*}EX2g0W;vOF2e#p<5!N6rGz$kWZ~e0e0A-#52$DM00! zr>G%9i<+|-Gx0E>^5vk94>h6Z3aH~G<6{>4ggXd^3#+4)k5mbA1 zdhI58OQ4?>2o6fS*tn)b-l`vS>}tK?5JIaJP2DwtEbC2BE&gUFZ6Vl|(oA~84o zE44o4Gm>NdJGp}!6`4>Q5RP(NAG{LGaEcY)Te}y%MLWzelM*#(X(owUZ*3ESOL7Yn zU6}346p4m!B||Y5^PzCY7+?G>oW{9CQsf1P@c-4&VZNHbjPedW7&6}O9eKuLtM~DZ zch+*QwFCB9X}MO?hYH@r3k4HYw=)!3!S4-iOY{qGpTVfnUv1fsEi@gX*f~Y{M{ZU8 z1KZFVq7bxmZ&En_6Oiq+$ecpdAh{Qtqd1HsMon2kC<+-1F(cl1OjjoKl*9(UksKbE zJE!ajYM%`eCQYajDXEMY`o0wtiVP^1tmNC^Ze2k4o*!I<9KF2F_uTY$uQgsYR-O0 z5|qpZe)yII3`~obw<>&}4Azq=LNs{`x#UT>R}7W6qTD3k!!eKdFl-OsZXR_ zbb9|(qH7xI3wB4j1gM?)?#C4ahW%M-M=SBqPp^nI3b1tItzD|VwYbMDXDZSdB+s#~ zJOo8wn~?)M=)^I86a@+T9!lR^ageA+=&S3;m&UCSZoIcPI!7yx9#(oe(tQ+ALFIa$ zaCl|}fkx`(gwIEv);_j|q7?APLN!%7sTX%5OFH4#qvweH$QfIf*>u-ZfL983e*B3B zrG<(qTKkf`Qft0O@zuVK)CwXx`U8!a<9G*A0o=*%!ZFd~^al)9{8y$}qJ>&?jnPV% zyyz`2eSa)QO)9Ks&7YdvN;$A2gyq%@-@P4d6;+lAx#-U=$k74b^WTW$kTaAY+Mo2D zE1iYIKYwua!_>+>sQ#emlZK5FLsP^MXdVX$8x0v)iqL_@2q@oTN^a#)tyHQF8{M_N zM>cwxj5@!#Thuyzwtpu!%cBu#_2FpO6FsCBGIQ0#r8KcVN zAWF6@hb#R$YProK7UD8##jKyJ%xN&QSmFfOxM zwbqha+$jl?#xR)v8=*TTtD+HPg@^!Op6SsW>N3scXMQGENYw%Y`wvzuqQ&R3S|}ev z6pv`&35+5?1?3wnB(PDOC(1^h2V79nmiRxI)m)=sFxQL)G;Wf<#=9h-MMX-}z>-qv zyrn~=i5we?#@H)`x|Q7tQ&>SXpwFo+yKizWc>b*cV{{Z7x!zuxizDa<_GcOJ1+cdRNGnYxjt)6|- z(@{;MMhgJXdxa$lWqXQDdPH%;JC?s%8lIW2k%A?Mm`<#xU9RW4kPPA}=+O+z+d+hb zBB6<9rsnI*!8SSbid>PJ3h5@|zfrVTo?BvcqjnA`Bt9F`+E<3KXwDOvzk=2Q>Csj~ ze8wvCtFT3!_5#-VEOPGh)thviTubCslp;^#_k0YJ4I@$mF6Dj&(FW%p#EWwp?9R8eDNO3cFtok(g=f*?=stcRj`iYO>O;5Y-717-Te~)eOJ=1I={^?-Gu2VO z7YU_BL>~p)=ciKx7gPp#${0HQT=a3Uuj`ikt8*MbABzigqop~Wzrv;M1^x{ps0k;=gvQFt0-}$a4a7qrN5oesYy8wRTbn%>{#l+pPWLy{yorsUL6N`9*9zmfiXA6Ga*?UDxH&mGc|K4;sSj^0ePt(BoAm6^e;AZ zwFUPgc9f%FRtw44;J=a#_u*3~jn&$0Y&^vHVx(gc-g+f?My-Pkp>S+lbM)b)^}5Ng z1A!~EBQf5~L~LHM)6&RLTK&L-DEn;^J&_QtIE6SQYVBpZVLCg1)}5F*2rY>>ZR@uK zb+sD0Vslf!L#36T7)`9KfD-RD6=dQuVd(|e$>ox$t`vuxP7gWhmMB_MGi>VWfO79+ z4S5akk0N@_2=4(d5Aoa9#wU~=dnDMX@v}o5U^%BK9W+BHiU4T=rVmDwj8MBp(k+eA z**VL^<)hExn4F!OO0%4jdScWR5xKNB2ovPx;}^#{spk35A7uN@%VGcYvwVEI9Q4T} zR;?T&8kM=*H@d!-sStEzdSWgQ!v zgj5;|))*kJHAN*sqCbuRP>+zO#qs?>&tLxK{2HCWY6qh-dK58Se048+u9XUP{}U}^ zpA+ji?V{o@*Y!t#Aav2$Ljs7sQp(T0yFT7I;Ycm!Gg3Q5L#P0F)0FrDmoHTBw$F3` zBErI0>B*FDiwcgX?=<24VK=5Of=C+j>gm^-^ls~2!8m*aykBkGVdar~PJ>h-I*k=a6qqg<;=oxmY0(?gq6t)Z`|e}%LJRU6ia1Rk z98p`Jd}EaIK_e_PT;d1>+mRDe@N!3VwWr99n3zS?QeV7;E!y&hjU^DQ2-S*mDH&&n z+B$GIdekn?km2>5E`m0}7rE6oy)Uff?(uVdZVq`TqgZYhwu8<)+g(lV(OVo^ZxVwP z0URg!VL0%)azi+wC86@Ll(h+mSZJ_;u9iaUNn{}8SmWeH_=!mQ_+qcDh%&E6lB8KA zE=kCSR>$GJK49U!Xs9r%a*D=M;_IRBCYb2=VPcz13hK_LOZS#T80-GLZK_73-(@%LKrB|#MF|CQ^@9SwboJ5+F;`PmDkX7p#pSR!fUs*oMd zOSG08r(eTXjCDrU$g=PYbAFTOwjf(>Dtq>)aZ7W`f{#(}vX$?HMUX4y9SN@IrgOJb zv)Qu;V=<@hky-7*5?d30k zUvXKM+?SpNTO4^O!!fMy&}Koop|6f~Bd}=NYgDeaU8W~EEBURNVKC3jZ|!pMk&uA* zRh{e)f0*#@CzkOY;Ikf-E7*du=Yapr&l6)iKggYfR{?KXf+h+`O1t{xfml4OCF%ym z$B<-e#Ve_RW5|TuN-rPf6qXU7(UUi4iMb+!c5hpK)v`nj zEc0T^(bmj(7_Nfa3eh&a)^ZbNFa)3+MNw4F2%2!y1NO&;6f&b;*-XiHES~X@s3I$8 z0IeC#k|qlQHOBTX600;XD4>W}Zv&3TZ)=iXUn9m~Ia~c{87L$DB zmWo<4Wmxt~^d&MZDhM6h1bM$O?o61Z<{mwPi6@oQm*fbeN2(R0Z$nNTyy!<9eq%@@ z&}OTk8XZ+5QN8SbVq}8oi`EpS6AdGaZ{JARs0K|7i#n%3_}$%Zk-cdKunQxMV1 zt>MYJ&G$35zQ7e-nXpT9Bae?hJXDAb%Pzv1;+VuQW|18xB0CN@y+&?1g5o~k|0Pig zP4UR^)NhnsmXmXUsw~kpigoe17bJyCp1(50e0#`}L_!W^XLpWS`>vwdmX; z$_8=*WYAbkTNq=ab9Bbxa1seMT0Y#yGxi@5&el!Vq)il=O6RtGPNAjDPmsfKi{=!< z{7dfxMO39P{^ve0LKsp(VRlCNuZXD#-1{JghCg;g3vU;A*2C?HjUpcy#mI@;&PdSA zZ?K_tzl<8e1Rnu6vGhnSNq}EbbduN6)x0qHBTW()P>w@$Mve0l6Dw~lhho+btdTa0 z*Eq+Udc3P;@4^L=L&_GPHcN)3KRG z?$j@ja||k-g@~gmUfwb>qRu}T)ecq^%yAnjHrz5!Gze#!fxvu`fbdX_1Svc+nv6r* zo#j1KUJ{PIM9@PcQp$!khv?XE0*9DhixvqYWX=XD)Sa2fpm6#x-=qfVkjCHtN=sPc&eC2{(2^z1#U$)azm>a2>ZqaNiqWK46*UKx<)w+TGBvC_G2*09-V#>P=XDbEZhQAmj({1AHJH3`}JdfQd+X!OnE60EHiTB&y! zLP7`ZHR=Dl{*2-zb_|sD_+?j_BX|Wydm(zeaiDFE_Vj7K6$J)msNc5+3>0LVqwe+8 zg0V0@M|3n86(}Z=xt;q5Y?Lv8HIhr-ZQsxYTJZdu=o8wufKU1eCYuK3z!#U*R14`I z`Tc%OOZeZ+FBrklAr~9HwkouZ^-su4Q+f^G)s&)b!^k9Bc)y--rWSPCxssYY(OuM3 zvC`K538W=V@Y-4$1Jm6nY3S1wY(?BboGih?3NV6^w?=$TQ7&?*%tb`fM5}dx;dE zQ?smJSI9Vx)t$mVG*<9ju&+5r>jd|hMb}_&Wle_^$|N;jOJpVXZR3yn2!Q3Y2+vXmqlHq6X0*uL^@Jti&@;ibt~t@0tp?rsC>@p z=_|eVBwS31%|&0f&u;sN^-#oPOrOOst$ERPwP8>IM-?)=sa14SyOM;|jTl4%r*j~fnN9)c{;IehAo+JdCZRDsAs`e=4xYks|r@;i2Ym z(zg>&^%5P!g}M_WR+LUR@nk?fx^M!C-9<+#0koxk zn*Y01`anBo(>-q|K*?(rXC%53LYa!i9^et-cQCIwd!zAQLk%&7xfL$1FVqqFX=Q;B z7PDD^6Dlm1y%zEsBGfO6(@kZiC51@7^<7f02TmTTv(+2)5laFH-d>bWu|2*EDM*Wn z(pxJlZf&}v(}U;^kDJ6d=lxp10s>5$h_lg)7#lyU@Dz>aHF=iIRy1(lbTW4f5NcIY z0~jX$ox6`z_q^=h+Nnn;oR_Obo)HrXqcACkja4cPng!7}v)ZU0C&x_dJtCl9O!M$K znL%$^iY@3a7(=YEl#Ft=gB0yQIp58tg&*MhXJb%a)xtKXWiC@vH`vrLrL-Z5KjhFS zu47-2K`0A5av(x`@kGtV#&#@iII!4+#!hK3Bz$@&?E~OeJOGQdFChfO-&iOGerl5@ zj5{*V!=>;05jNJCglL3N3}#}rI{oS2`|01+#vn>>xC|w{j3_*;LL=45-U>eQ?W;M* z`@a@rCT4%hJos=ElH@fC+oLmJg%epyZn*FTxJPo{5YspmdBjE^Y>?;y>Z)4w<&baq z_;bDa3PoJa0wZZ4G2XB34C|(k>a94B%@tszP=b#`_6)FWi}JOGm$c+o1fOj?UC|3a zGJbM90iv?$B9+!RYefGAzBshNIM)qN7pCL5B_eIwxojo0A(tRgu&tSqPbu@`XZa`i zA89c(@=z%=QVlaRk-e@8I}{AxHt&D)r@y1YjOA^Bm}ZQsfD_O5%gPPJ$gnL5O6dGs z8YU?~)$J&PzXMuVq9$@Qb;?9!`n4hwP3f%ngf$jOE_<%(%h&l_n1mJ!jN~iv!9#S- zq##bC(5i4u;svRV)DNbvpF+MyT#*o3`Gk=QEndUEZ~3_5H8_9VXU#EQ8Y${EykE?B z{#dPnEI7*$C1y!EOzAdpK#hQg&R%+5oZM=SY9j@b-gf%pJ|-yY#6{pf(-!&5Il$69 ze?1!7AxAY9hlXPA=U6w1cmil72W^%X?jy~KlLNf^4qRBuDa911DOwlqEt9QBgK0ch z{md}T31JXjT20Um@lkl4lHmP7Q$VQ&ayXYZUYJU_q^&;h@ParkyR){=(W@l4Pd?U4 zVNns7&;*)mE_#G$F%;{YDI2PN>Dc%0NOCV%m!YT=x;u90Y$ty!D-i_hqIVG@Dj z(W~;2lP6mtyElnY|5>GjS}7DoE?hzFOZHWFdJh$KeApG`+0qk;DoJk%uEt4N$mIwku6bgAjf zsI3Uap-CJq9GX9Zr`9ecBk-6k#aGr#`5Tt##E zR2ChX_sK>%%1ERiPU$Jyq=~$T`%W79U2s$GujSMd9d3%J^0{aQoJ-_@Au30V;)$AU ztO_DhYEct%s?o38n?N)N3?z|@TG~joti3949u5&>meDD|onQpQ2<9hEL|#s*`?z!n zK8gN`5R6`ZH673#1*WDIMB1CbLYj){9~=p&o2OICLQNmxjNR~*)@0E2eMdA;;>xO* zRvV6@TCFe}HT_r1UV>|Xrc{`E0^--3OLIp=qpwag0%~wr`yHv+Mue?DJ(Cg&aj@x> z-j%D>G!9f!EmbECa(qmbkoax}Sa47r2}uoq*vjQ1 zcTHHr`kxKgbh;I<*%t|yn!xFE0zu=;hQa5aUw`9a6?9+GJ5Rs~b0d7wKHf+x+>0zcSdYB) z;lhY+hoKN_Z8$FcV^8RYB&mCI8iTpYpDS1QWMg4`deyZm6ak#yxbTIX){@m-v21+O zA5CXDM09S<#o1cj13|$IEuiMo{AyYUB;~|-8qLErnP}>#UK7++tCqOeq1JsxJ@K4t z=oKCT+wL^yK(*4OoUc=2&JU^>~OALFcn}WJoz#X#$}k_tie4hp_HMJQ!JW4da>C zn2+HN+!ML5A>t6oqVP>r(@6@1QU}Ln_a@rE>6A7Qk{d;X4oF6t2g}YlLWIpMy&-Z6 zc7JpK-<&;xTOg~oeKw6^f&AXfLrsr;5(KGh#;`c zuqeE@2z@(V($(`wd9iZxU>f_v?j#_N6m~xlEi4lok)}M_P?uhy|8V4^BbB!`2uiYw zXkV`6(~tA7B&a+RM(6v`WIp!Ey#y;6C;gk^)1-er&MC#F#w5sBX4PRU8ck|scs+P1 zsrcywp{P5Jws5KMp}uNp@GV#2ivzubmlKU6KlgcyX~LWyErW@gC|;s&#(+DC=%*JVKll(Xn7WfT|Q$}x6h+pIt%40Y`Jzm(Ng&;q_*iQ-_ zaL+OYcH8g&Qo<`-V5W57h>zsQ5#)*gW}i|T6}d0ONpI*Ip` z=6qgwh38^Lr*kH!so&_z_TkITST&$s)mXMY_iU9`Rb)iJ?~OY8CRTg6F-2}1ePO!i zj}(!NL?$odVGq!=apZo)FhsQ%{n3P&#$W~>l=Z~&=8X~IbIwXjP}5()i`!6NNJmND zabz!jO*y7G30rJpyOn~Y{1Q7cAG=A<5mHz_%s!v&&TYWx!CY=S!aK#&pT5@n)VNqS zMp%qR#>5;Fs{JCQW75??s5IBJsb%lBzq&EdMN%tVy@0Z$pRNd_>2${Gs24$hvT3ze z0s7r40_&JbLT0)}9Fqk0gusbX@+T2+)pLlWir$N|u#n@}XZ9V*~%g&$S_Fy-?c*cSCaG&U zD+<7PbUPcBQQSZIW#}b|3;3ajn~&YG)^r=+f;d7^-xCOQLKQ@*(HjNVpOd+)`R%o; z^5$~X^6&oiZ_BKQHAI_#_^^9ie&ory;h(GZ9a9@wJAy_4xg&^}!r&`w88Wpk`c1X4 znCdjrI9Zv`CN!eWof$q6KPip*@k^~1-3a!fw;uNER9OyAaWG|Cl_tivU)2UAb?-%P zCxY*zdVHc3Gt06haOvG?fNFFjN)&Y7A1}gsPP1WlVw~q3I4dD6tDI*nwRMI?BQBQ0 zWC*&rFoS;mM5Hb<1&&&@svw+))PfV}TnlhY?! zGyxdeNp~xl!4^Euzf(AB4z}P_pjWA>aWc{^k5K??Qmv&V;Mt3asU-q$W>y7)W6$_n zkOAP(6b~_9OG(3slO9Z(f%=vle3k(C=T_^#8e-%fA{YEhWJ8M&THiOiNTA2z&3R2vRO5$; zF)?8@r`|$9QHSIl1R`0P=c%<0Lx77Q_NrEb2YD@!ATV1{TguZxo^>PaFtwF4=P3GJ z&R5QdNHd9+x+51DElmlRc|`Pcbo0XCT|v!~k`8`N8yu3ej+tLW;=; z7S24`RD5Q){jnx|gxF%jNdn|%ycTLe3{p-9l;g+uSXvw^ZjQ0$2vK6naIGG$Y$0NX z30BxX+&y80LDU&J1j+zmG29M1%IW?-gn?xJmcMIJT03HzkW+E2P+J+96AADRUDn2z+a%nF;6Gb5a@tI%R{ zf<2c|r|G@3iX%rnE}g2dcygG;z1FGRBM}-Mg5h%F1udFI&7dh$(bSG-5@(YE)~ID( zqDo^~IGRSaU+0t{mcPbz^>(62?t^GaltX+@j2Oj0fP-LA!vwg=uqc%+(z$8lmE+)M zE-mDf3d?drON7X31vBQHZ!9-`G33~N`pnCb$;s&((RWL?*9iCo@8j1;nl7fQrMCjT zq{^TFc|FwBXoDIGxGJFOlAtH?p;65)fX8vt>@qzNZ7H3>hY2wb2)4t^%PI+%yC=<~ z)xg{n9+=j}oyeRt8~aAxat#6UDRBRp52PCOG(KF74jV?#Cf!KTi(=$NICgd9`-l%7 z+;oD{8k@kk#&8iD(wPf!_*2dwi~W_zxjb`uJ4$8~AHVS%d39c<2Bti^AX;S+2>O`i zCIJN4hH<6OaF;EFoiDH$QO$GUH&ro3rsw5JQXpFG(ScqxRNa%cNk)PC+EDPb)K0=~Tlio0Dn*plABG6nYqwkH*NwG3V z3~-|XhrrS-w3wYH|9o1!^Mmi!EMMdULmY*hK%~MBbwo7Z5}}($kuP2sL|X-ezTBF> zMdElOcsUqFMcmdzTU6c{Es%_+fzVO9_xLZFq<_+)cHYz0(df z1$lxkfL23xwk&jRz|6xvYzVR@!ns^j{90Bi0XLO3;%~dBsV%1w71L*<$FuFf{TV?e z37;V;$_m|w%N%t3Zcw@RDSI7#2q$UjNH~F+sy-V>gPFI4DjF0>ZBVp=hzloSv}Pz; zL!_O`hZKhu>R$6(nAu1&38=^^a*CF1+;iWve*nW^`Xu~s^ zUOtSI1_y;%&U^2l)=w7x7=ifREPJpX{--t*kN$NTu8Ce_VI>&b`1KUFw> z`XpD0uJR_)^>I0p@b8Zz647g5f-fL92);#$#(hzK>?ZxK`R&_&NZLh9->hj)sMW;D z6QioL!ww5hI|gsTeOt^4R(jqvhW{O6(>UXhGd+P7y;T45;O=XjRd^6Zc9&YVZRf&o+}vAfH}B$)bsNSfv>jRCwnk z!i>Ty2*wk8mVlm%!K#L+q8vLtcn zn5Xg&g=*F|%gAL+v1oqr)t5OA9;1<8HNFyl;$I}7ndn4Y*+*aU zD>%G)eN2CGT#oxdDX2j^HTGq-Uft3{`mhEy2OMx%3%jtm3EFgm5VufyDPQ_|<{km*=WG_;M%l$|N|Fe+0C;q}%}ZP|QnUn`BWON>yxYEb zQq;f!RHo!T1*ff(q%9Yf9DwB?n)MSO&ed@Ilodg=GG`Y?v|y+wnNqHsM3h`eRHk52 z=bhd5XL6$F!~u=dJ@hUwcyWx6v`olv1z20oZjRbpi%m{c(znkr#EKTrV_^c?vr$jd z6PmB+-b3;LG)k-E%4t81A{af18fK_JIL`fXzR}K`zJuV{UpYi17*!yqbz;(Z9N19W zBqsxuM$vsU>MPd{{&KfMgaeK8aSCWm*lyWu*A@ZTJAgTv%=FpIAsR!A?$*p)J-vqc zb*bEVvDoQa<;`&dCcInGhZcR$QrWUc%meCkO~n;aQ1lj6XD~tX?f!hhe{}K(lQ~q+x}vumo!PO!B_N1 z58qhH?nYqBpFDh=Tq+7r+DQWf-7E46uIM>H{W?fnoegh3pF-myq63`#t5w0j@giqyg5z5T9gpe+N zfc;?#MRaXK{27WPfy_-+@>Et9(|9D$^7C@Fe^z;UM+>85>}f39oKXZd1#3FA*;YB* zc)zEJPQ@z4Jj4BLb0oJ!OpQV*ga1BsUYzRDk$`~M5?XE=JK3PmqT95<878?b3T#i` zWM6uRgQ#`YlbEmzR4Zbm?Sd|CGf7-7bC5T9nB$XBdzM_ zO-Y`X7g)QLuwh|<)I5D5ywBztV-n~hqO`V6JO=X+r%C9hVSTLTl_c)?wpA;AnK^AHo{s zgu;pGfOyWAeACejS1&7{qs|1gR&tb>-fe$FccMbv%zW}%-oVv!$;1mQ;rF5vesOwL zS}_i;Vs??2VVwk4*Ow=`;h)!#7zrg(V9K@4XrE}O6NI%V3jAtXJt#3O>}(`Bts;KA zZKM|ypv;9n^^JRGwJwt=cF(DOP&^qS>n@LdW>~46yDdF+@}+n0G`rW{ZQ?6p1L78x z9vgGKxULRTs2Qa{W6dPI(b+5F^6CBj*vA$OA}7YnC7)wrY9=f1)i#G?8fw0}FyA&O zX17{BkrVw=ZZbzi8)T?lk9aOJ8=}kQ^P}$aGakxecS(K$t7RUTtew9dI z4gVhIVvdC9d-ybQNZd+)r#pl8+fBe(GPFsoV7LiQH1=wQNk^=*TJa!j%!3z?v<<`O zhw-@<*Se`I3hmimT6GhB_a;swXEBT*f!Iy~@sMn+x1B^P(6?vcI&L2!6lD{K<`w@l zCLhN@xUg>73~RHJ5jFdBSp~4DpR!s%uE&&*5_ z4iveR{p7wl0)!FR6MoNHY)#*2Ss%Jhh8XVZ{L1lbjnwk#Tzw(Bjz{NOy8*^W(+YOD z(lFbZ0~$R}ER?E=6fi=|Q!WLILyugCbBDZCjRhLnnA&zX!mY4G!Hv%ZK_M8?1;O;o zy`?IWK!Z1jD@Ha+QTlEajO$3lsIc{{jA-B^z7LO~?NjK)0{8;utN7EnbZ|~Vin*=h zpceuJfKzkBr#uc4a^kz@hGc{P>Radl-l6oFn%f-kC3jY~v#&b|MVKK{vT-(xPrPDv zN{b|EVpCZwupO@*;w<8k3vaVHHwB-JkA8?=v(jLo5NFD)D& z$LFz?a*T01AbbF(<>AT^j$rxw+Fw)Z5ISWwJ@w-v2zkBCkJKa#Q|*U?dAZ^sLg&K4C(MG4G<$8_J!%Uufwfij z?D})x)A%&D(cE9o)tM&xSDhsiIEu-=R?#_%>Qs&L_}xOlZRbB=rmw9K45Nnw>l$h4 zb6?@q&=^OHatgfvt=(S`L4O_^L(XKkegBsXpq!08ElV?MW~JX?GzrpiV&b^{EsiNo znIZ`%ccet_Lo4yVW{ zEik+z9F+5kx~0zioZU^UM6WSF!`1C_aG&a8-Y2M!65ZMq?-1i8f58E$@YSz~#S09iXL!ZbtEghrvkS8K6RjYwbb1<58s+SpDnmJMjoudS z6$~$+L^#e}v&e^&R?wWEAp^mDfd_M1H}$8g`HVv9o*W8h?Y%!=)7mDX@}?^@_caK~ zfjLIpoi)u8&3n7;=Z~~%%En4>cv(glF(t%?HUm|!`8ta;7DA~OQ8o>CV4c$YH`cO) z0K-k-UE75d&2i6)lcm+-lnaVX906OR0pa};wIiM)zM^5-E5!RXr$^Q^CG-Q``5dmG zUvyEDrDOY{!KC`Mn^eP*zS4(fx!+#KVi7reryu-c~gfp|JZaM z0eYnFZE-BF%C3*=u!;yOP0OkxV>boY(I&4R*!iEh&rsaOj#}(w1jEP@^_-M_4?H=J zIVdNT0>Ote0i}r zAe{3<(dovr`QXwNf{J9CX5mwM@$y)ABbOau!>{pEarn2icN+c|k=a8dfC4FO~j6r;I(g{oJc|6aUT?@SHtK$EwDD@g&E|l2#Z_^GGJ4Jus>B0AoMT`0_;7Ag6{!wI&`=s z%;!fEd^fTh=n2VmsxJoRa3i~ToDTmkj{b+BMMbTLaotw zV@_J!A*h0<2xQk`jtg0rv8k{XYWbTPyG4(mZ}3M5*R24eHq++P=%&>{)NF&5B>?5^j?AcD&gFE!_1tNbdT2hqU9qQY#N@xrp}rozsnnRVw4h5$S=a? zW&0zdoi~ThY3f?2;MCXWg5XRw3MWu1I2h@7rhSn0ODo;{9(zbbkS=4!8{Q=?DM*y+ zYb_~IXr4!kLB^h8xY4i?8NlL^xS=@ZmxnO#a=ccm>E-dQB!LvmvZA{AAX79v6ipKw z9qJM~O%BKoVUw9Lg};@02)U9wg8t0`oo}=_#--^goMLJKedhL4xSp83nBvq@<*1h% z=F%Gd@QI>COqk!*+YgD@CK@p^jDA~_ccXlc8*{Qf3RhMF6lG+%O5)sT^K>+fw1rTI znGbQyP<}R$lh?FpFE|o`g6vA9zIvo>_wBq*(s{-;s-byS&ggik*aT@%cVq&2-2#~{{|Po78s zV2#6dlqVD?SLGZ-1jOOkIsN0uuQkce8@Q82X+|6V{MrKS+0fZEljm6U)wG@3=Mhfu z;fi14v<-Uq>hg|$2Eyl6?f2+2ZZsaj5-lR~*u2t?dSToi>L;+W)Kgqm^Xl0hdT`i| z6d}j1D$N&TP@q2A_d3=Bse|(JdOA~|S2kFyS=`wEy(yqA)I5Kt{S%aziwz;4D~eqf z&H~}$k+&^u_DxHu(*qBOieA2~J&BrQo*-!RGK=Z*3;F+=6JEKy`3vI%3eI2QA6yke z5JMiwCMp)bK~3nX(~GlAf}rP6Zf=VQCbW({VVYA>goB&Y>=Hns{J*2BI)OLgfF9uo zm%`f4_KEWhP2Ia5tbxcO`%7OrMA@QGM(&1%hp6_=Tw$I$@h7h|n}_5vcq$f-e4Lm0 zjtNLyrKVj;awjzV~~%V*FI52N8fP-Ih4fU{VcX(@^1j2F2@48q zsHzhFB4OEB6rnUMsidqZBQl~s!ndI}Uk4H9lavc61pex|{*2R5%R2mA!X6LFz7JP@ z4ZiP{N9Y;RKzNi%hBz6Q>qHu!R*OeA`d#PO(Ff1X&BSiAa5+8^@j{YefPcOh)|9z4 zpW!t~^7`_*I4bH+G_3J>+&-Al*jujpB~TJFgA4olejErNzOZ~aO1UIW_6L2qSLm$O z6Xawb+{yPimZWqhDxp2sGaOQ8Cq84pZQeN!K!k^@Ji(0bD9GxAe#OmA#-bNYsDVpzj# z#lIR&v~aaqgpt#O;#Q!HN>fveIJ)K1fI*ofp8VcA*dVuP4dwolSKdc1`efmxH1mqW z?wRfN1+}Y5U6j@ubxqIf)QC{{bI~E9jLs;hG-i+HPZ-S_EySCI$C^)8JR!@py=WwK zh-ki$lL7CQ-ukk#Q8&xV1A=qk6-#)uKa4wx?7Fgzz(M8QaPF5C^z6^%iBmc)^PUOQ z4Qgar0kNsMRTrEd>F~xn&pAiuw!FqiTcVUOUD+-_di*-+{E?gLuVkcgTDVKl)*Z~f zszl!4|Fyy;nPrKcNY^q~2F-#d#C zp|&Db8DzVwVb4Wpof9U}tT9(U%*#C~tpgK#dy55O+5Nl|-H=tdLN|fpmY?1gd|_?a{41L#sbJ_tIZ7l3ca<*@VJuS!n%B5g z53In_l1bJ2T+4~XFt~NjvLnwyGeD@dgn)!`bdnr#hL+XP_Q6dQmb}Ej;ACNSF-)7d8 z*gD62&$-~~&_hv^qhF7@wFS$mouyTKj!%$27*3+nCw-1iD%d`s8mFO@GHeyIwJ@ba zZfe->wDWU?4*c}8(7W^`@OSt9N`4F^phcEpS>sf+&t0SBU_N7Lh&VwMaE|sc+pl$4 z$PM!3Wde~De)-)Z*S#H1W*RlyO1ln&&HSKGS4)oMu2TSs2uL&9D=`g~niekl`MR>D ze)KpeLAIR0{&{DqIB+LoDPm|Ls%a8$D}5XHaydpM>G-?jI0TOTbG2;0mCm|8=bM-0ozV3 za32tL_w<<&P2r0>H$|VEWAd`JBYuP@ScnL-_0m+N%sMh^Cdv7Y!R^)loKg+=6|hdE zMPp3!SG5jB;cQC$a+-2JibDF>@-wNU7r-L>X%2;GQR`s9`sN{R4D^dW?3TxO=9#us zVZJHxO?S4gG#!+G_oH{c@v{&H;-Ey0?`v*s_IBP|ejanr+z}i$=YHGgkpLoSb8IB& z(C^D=0>6~aGMy7%80N9ny?FtC=4kJcd-lbP{C30_m$f2Q&ynf{)jLGcTU^@j6(h@_-5bS5Cs#n8EvOxfAr_%oNlW%b(1v>D9UMQ_<8+P- zNuJ~qww2`@!VB67&Lac&Rc8k^txzvR*Ed+9Eld6osmg4jDm}_2T zB!*BtoZK9AX8R}3tF{KmEgLA8NvO@l3hv$@0ewAcPQ1zMKAr!=ES?DOM z66G@v@6rMBR3{{F!uge#-8Z$5FeJuG2(dv+dS`2Twt{R?3or3r=TtZMH zZnIY#3k;*uWLj25@u zIpe>4CM*J$=zlv{9tr#;JXSh?@Ze!Cj69W5q707BJGqod@_Vb9p``>3=9?kKRPIWB zrf#6NnO;FrvUxyXjdB-WMY)4E7Cn-XG5LKo^FhZyM(jE7kT|z7C(WZu&Q-uX+G7}!&S*NUl1`%eq?ayIy(bV$PHTNdWic$7hl)A^ck*2{$2}%Vk zXqA|&qCjI~l^s1bJm^&MrZx%t2W`gF)+i#iJxn1yGIk{22>)MjZh|_G;6BJtMwcp6 z9?5Lj7@7W>#ktO>qiKYjfQcD(gp%cD`_ZmUBoP)yZZx_T8Y`z1m+6YgkK^Dty?8j= zSVCu1DhRCDDJatd4k1%cG;vwGrrc9Q!+D&Bj+``FqLWcAk?)!?M$jjc*gT0_k2#yB zh;H2Yv%v#2nCy!R5ue!Y2M=y(VjTH5H8haqHL>O}O{XDcSFzw%2o!+>?cBBEg}Ao6Y9@eF zUBIV=N3PdLuLNB9)J+6S&}pC1S0l_HT4641;6$6eCXLGccI{-Ulu_UR{`Z9)5;CJO zO)G{_T%1NbQ%wLV)SET|@^M$72j2I0)@VOClJraQmxq)n5D?okpB7t!6x^F zF5H_=#k~6T2S~Bn^kziu%a^nyH8{66oL%0GZ{D_7jo~7Rz4_of5={jxq^2;bHAl&u@2uz& zQ??a4-9?F3tYmjju4w+?Kz|%Qy95_*-HFxwlC`AZUmxaFPUP_)7p}wOf<01>_s!Cr zDu?*jj78kEuq+9Ix^b8br&Ho91(-oyT!RKrzdu-58Ta-aWTEhB}O1q)DYT8 z(~%SWfVeNxeHrR@PB-|EH&urlAk)KtoL@QcM8=>?S{votjRjc?xhIg6j0L1=)M|z^ zDrtGQy-A6189kW&h^ZTu{^EIf(*A$a-nBXMD>?J?SMa-iY0=)D8F|EMa&F}4X51PL zveAH0Xi~G5EERz2MzH}D20-$a=)W!IL%UOU!y1POgF|DIFGe4e7h zp%-R^MqQ|Xo&Py`^5n_OlbL`j{6g%nR+7K@i~Ik@>@t$8P`8Q>y2u|u&GjejHKxNe zS^sa;;e-gqg3GCAJC-2Lwgo+*B1Ntk;buAdebrP?=W<2Wm#DZ%^Jeo;KdPO> zoM{F}@YSD>seELCs@x&&hWb)iHh-zsJXdQapily#d2PP@2c77F+u48rWi|_>D@^bO zsvC+zJvviK67LCBM5rrY^g3)aiVA9wAfUlF%djB6{ny$r&wF{L6#kx$aCR{gd}7fh z%g9n*jw5yC7=S;Ir`q>yZ_RlWU#!hQbLCPu8*oE0M2K(zWySEMIQnO#k~P)#kMIAiNW(#r59#*I#>#t?gjKbFQdNCk5;-W?8o{egKHG!SUvDsA#%#mb4;U4Ciu2@c2alj`Z*E{$l z)^L5&_|{6;gX~g7h5C+csCHW87^jyZ{Wvwj$^*||qf3#x?W;+lxju?2P{850tNQ_f z%20VlfhBnn!7X7UiG7>MxJ*>XtT|(P5*3umPBpd2;CR^pGZs26(i}eZv6&LR&f%*$ zxnHWeWnS-2OFnNzqhOcoL{0h~{Y?WmrzFW@?%{JH?>G*HIsE zerGE68(!zZz^j~RK|90{p+U3-AIX)47<+VU4T_60(|^I?t$VXh7Vr8tS2|E^``qQg zUPSLDoRKERf{TiL!O+EUY4z;kOF`Q`Qzhx(+OLIE-#ZeDbA{Gu(kj2y+<`X|R8NcBHnQbEFWK_9rFx49pwM#wcjn%#>Gq)AS-c0sNR$!#Xv z2JZP>aK|M%KPRSao6|a{A?Hth1^AW~t!#f-L%?PuXc>R`6q@F)mv_+z{ z@seSEngntIV5E5!@;9`j=-WU0=D#liO7d|16M$T;6Kl#{`<_nf&vJS#eIuc1yckF+ zZ!-10!#yQcIF!?C6a=q9x$s$r0Y?RV7HX6lvfTAmLnx^xy2viqxaFY6yCgrkb`CHg zWDh<6qfAjdlMl?_f&KIcl+z+Th^vYJJ?(a_PoCC<@UPbc^V`eV2Ndw-#aS5gIEwR- z9xsThxO3vUN6*4jyD=(dD-D*Rz|x&UV2M zM@Ei%jto$Z<v@$1S3sTZs=PU>E|jZQhzBLt@5+OAh{ zo|}zS!M80O(vIWs+=7OR$aewU+89P#H!O$Vv+S7Z@p{^+zMp(lvjj;_^Qd^mrEs;T z8!qz8YiyT3iug&=7j}h(8;mLNoA*?CrkwVYg#;#RV7Yi7nh+KVl&9jK(p>_SPj83J z&-yLe!wc!DgJ_%=^=KCn3Mc^J_y&-h`$XHtk)D0<3wVylb|n=Z%+S4dLup zlk~WYb_U$r6x`wLELtBqI0)H;EK->suX0q9&WjB_iwoO78PQ#YyGe`(?_~$jj%J1AuYlE?jlwgefSzg<@P zzJL0}H;01#zOqOsXR31N`{BpaFfWb?GW#p?3QR?8G8ICvxpHobG#BWN-X$VytF{F7 zHSXfL6X_G~n+*MUScc)6k`Cz^+`ekO+i`5!8?=o)0B^|2^eP`3Su&`%oa!CDLs-i1 zkb>UL;jxU*1Mk<8e@Tfd^Cae5zKhI5-0jNTH3ft%!Cjp3#w`Ux-7ff|G0DO*1$%%= z)Se0e($*E zF+;pTTjxM>Oc~Tirlr_L4I-bP_}HH%ME(7$Q(tD@NIe&m*5;`YNs21D(N}gri3ZHA zxZmkxIkK_MWhY!+ik^kSG7zX~h~m#6;#luQ*pSdx7sO#-bNF-26X@L~#@I`XBkGN< zl;R9$$YYAF64x#5z5;KBr;8|l$k4OJ%V2KGTP-icYtPQ|bj<~qyNwLgO4x(1J9KjV zg*R+>ww4YJ=G5VjKllUhlDb)D&a66Ta37gKXn0Wz3vED>3B%n_&}Av36I6PdMM5W` z{nkZsn+bL49m9}Q0S7PPb(r~Dp18l!nGl9i(qns8`fFst0v5Y*v>(+IHb zW7vBN!mFX>g;+UNzDl2tqgfjxRI4whC3tnw28=eUeF}V-wL}wUFa!=WN*+@TrYH{; zxGg=cm0FM#KFn$xBh-|!_vxMD!am0@uz`wBfKPR5hn#($r(9 z36k}KXQkGUjhjexa-ZYu9N<;(q1_@gE#kQ>EDqKe(se@Pn=GPgCIEWmwb@Ew4IK@;W!b*uBEQRRt=Mk%j{QyGj0t$Go+TJ11hm*VWkbdX4yfF_zlvpn2LqwO3 zm6aUtKxawGvB`pqh;wv`uxM~b+ScBpAhc;nL4ibFYpLt`p>%G|>PHVQ_L$@eIZP0Bf5mDbgxQ&dbX5CTzsc*($Ems3|096rwz!N zJFgVAf8S#ba+(K)a*wH3@_fxtG1*SZzr#nsJyXlNfB)%EJ@r*}ZRn_SD)k6O97E0h zCGs}#gmPpq(1Yt;Q?8ia=Z8zWzEk9|v9T1q zE;a`~#P536^cIeI^222orvUI1mO`yjGVX8rdQTvk6rdLSO zx0)}-(!FDV_g#&QXrG#0hVNFDJbRQS@WoKG}_u?vR@0U*Zjvwq-I5`wLab2SWlv8yz|!&X>^N<`~63RgVdP z6&IfL<#qr5mj}+%LiKn^s_<}XC>l;nE|+DrfFuBb z&%cPn^*@S0n+7TsOwUfQc#IF{1`z;nOwaJ9wJ^izB^NgKKpGT3@vtjocn9Zy@+cvn z60zpANh&<_iy|0M1YN~5Jt};tLR|iXkhak-Z{z7xk4tg5;-@!jPPXH)(ax0M(CTt;x(^qG@>xFkN6NFJe`NyeUF!+}ux! z04Cv86Rs6I!ojsfe1sD-GL_)ZRVug+!Sw53M#|9F-?;^f)(UwVd#5dOHKOl*&$FfZ zC%08=bwJL|JEMi07@@e6IpnPe<|W+-Crhc!{yEh<&xwp?cAS(-TR7ZG%M(8@oAO3( z+f>%M&(*##;z9BiOF(%CB))8X3jXyPAyT9y1Q!SVY z8}pI9S$ul`Z|?uj-zkcguJ;ix_1E}?U(Mot;7XLMWGxMFpj>P8C1=h82y0w5&pn>p zO<7G)$T2@IHyudgA_(ck37&a zt39xH$3@S_=&Wuiv9nu$=NViJ{=_^&PbZAX)v@NhpsE-ei+^yk){=V*VGPvDH^T`^ zq_RRi+udns59lUehssPc>>WYGdU*Pdc2Kn|R5$4(M|k?vPaatev!!zItw|Ha)Eai7 zRIAu3a^=GnuS5ZhNIVoj@qs7bI8{OzF2fyPW~$6nOoL1%8`bk_MF>)dssS}DB844F zarcD7c%_br{-!7GD!xfo)HE`ts~_Uw7%D)M1R5(5B&wC!dZMIKyR{g;D1(cB*nvT93sOMVmky-UK2c{UJYq+<~B;@ zsmQ!{@p`la&)Nhka|9%y7@i*Q?a_jp!Yu%#QVOFNwT1h)ex`B}Mf!`rez)!H{1%|+ z{h#|9U)_lH^G%>?^>2jqv6=hC$LOob=6qsj$cRQ-42rSeQClvQ`U##M6Y_=BQ4{hZ zNVDcKg}9)SYeHK{tok=WqV`*m=@qANdiB<1M%fts;=B%eUzu_+WGf#+k%TEda4n`Q zN^7WL^zz}psX_V9wP=Cz#i4`0e2=U8ZHSdk^kZ`f@j_I+a1LQVA;vS)=-KQ}Nhkgd zo_duN^&dvy-dY9<*s@A(yj|pcQV9C9c zP8F3W=15(K5((}Rc^3ZiH*enf4dYT@3khu@>l_n+8!+JqKrpyye&MdYm{ws(tx^ZP z9$^D(&r~-j96+!=Zc6kX4~(k*rZ4U3MF%85-|-kYSZa7FqxPphh=e+c&v(5?lv`4r zW&Z)s!}^u=l$|fp!zEa6xS8SN4<#6@33`qT)ib(I{0|jjkW2M@%IHmBw?l%o%PcD- zb(LpJE{qyzkh_kXLY00Ap8i_dM#uT-Q_9^{f6sE0q#l1-1Kq#>tsj}`2+B$VH^ZWEulLTvyVuB6j*=s#xGN1|`MsZfe zLVpi@Vy$uranNOm6;yP=V!}CyP6{!LGDll=MR0Td`8R*P`rrEY$=6;S$OCf#XFW79 zl2QHKv&{`Jiz}DnYK?bL>BW~VGEsnWx753KVo3lBf~@E7T0Q#YQQ(DF1x#3srOF=e zpD>~~XE;0C_whJXpm(}Xm2gjOnyD_@b zaJPj6UDdw~S5qBNuM`!b&%Ufe36z@i22li5$<`qd(-9ITKQ1mK(kyDP|LG5lA~@hf zR;z*2S~3tGBmoJEN97a4bRTYcFKzo-E3cC^{~4IKb~NsRA~WseZRtqhsDz&GXn9|Q z8`b%t=6e8or~a*&cdcj{*Dy|kyEPEO_Jf*anr@0*b48FO%1~zJ znlxM)eMJtUFsWyuc5(Z~PSb=B@!?xviWq=qk+d#F>f+XDM9=0C|HFw(c24l%u`&Yc zVI1KHNA+Pi`^>$B5jjzzy20JA`mRCU{M>uIN|l|RU9>p-#UGd)1NjxSTdsPr`PLJB z2sOrnS+`M(UEBau|9S~EUg4c7eiqbTN>q`8-hlpU4{bRu>O*d&;sqUoS~^hMY&Z-#QxN_KelaB*izBZi2=WvFn1|D0 zuX&@jgyByxHDfqR?}0BoJV{0#2Fy(ZyGhs!7c{=gh^lSD{hetyy>_i%6jG0NjjZq< z%6l?kh~^;Tnm22}e0*GRZk*FYQl9+$s`gNr9K-%h74(|>Ip#^>zD3XiXH5cu9!h0~ z^FZ<7kz(R^e$K(TC7J&I z_I640)*=MTYgBNc+Hn-rTa|xFl-tua{O|)h4?@&LNR5pvXk=ak^sIN0bQ^%W5t9-V z*BK*+sGj_6aUSN(s^USNo=KRokJ$-JCz(k?*Q77ukN?;^0wNiu8}Vbou`xUfrp@8) zJ2M6%=O$|X%J)v2$gksQ(DkyBFob<-`V`ITr0=sv?`X&TW&l&sclP*A`0@a6Ad#=a z6Rbcq_GX-TVnOy19utqSD0rH26R;`-tUa-fs4H-MfXHOXxL=>xnvf*7jC5DpJkBRI#}RXKcpts?O)+M?gR36{CFFXBfoowaHqWFEkQ zN#2E8jMbW-a*cSMn@$gmO*Jv(%mPZy}F(nsj68h1P zJvqu`8{sq347@@ehV-T3oKif_$CVnY(u!MO!V-+eUL?yWLUf10d<`iQip;GUqOmMv zk#YqL1hKi}%O~|Ltv75PuF9&mlv>g}s-8mzXC+Dk6k(`i1%@sLlIXQ*@>u+9xxn2p zI?=h5>2Lm~X}}FD%{{LDhubOJu3WD7SGbxjZ`vF5I`mYuA+)pNI2`zt<}4B{mZ#B% zSC*{uuiiOC6FGEnTLl?Pxg zFXtHJGo0ak15;_=1J<|)WR({!7>+SS3oHko~8?|t(={VaX`t9RT)t}?WgSCL- z@S`x0aG)?D6PCz)U#_4VA`nOt6m9$J-Bj}kTVXOM4KGyUH{3M!?xmvIXx&a<`YM&H9a9BP0^=|5HE-0inq|E?;nUQ@n`**izb{Bmrl`mK8cXqk@4k# zyh;p`a^Phk&sXJv@%__o*;i{{BG+wX2(;-(+u6^Spom=u?kOecH?eM%Qw6 zAX+>nK1!tshVZVDFrqrNNz?iE*GV4WJ&JuDMHWJq;++VtQ3g$?cd&;(1fUJ!Ac3kn#l1G>87yacTUQhC+nSbfSS&e4X<{ah;yqg zbL4o7h5z*7zgnR>H^d&%w}ml#Z{b3(0n}h067UFN@C=DJIwi!Ckc+FbfF40Lf&F3( zq?j=^d*UM!{;FRFvxfWxj|wU$$%2x>_3N>2?_eeGn)b$NyJ|)g*;^|6J6s)-sh->B z=9WKf9PW8=;xaxQTs&D0gLK37au}+q(;Scc5XHq5LP^H+EoY-gpCEDLP{>8tY9oq& z`?QgS3A9&XY^}0*;pnM}PW&u-XZDTr7)IB0PcGpZ>a=<9{=0~@QA zj!cyjv=F{H{1$gdJ);WNp$Uwf&UuGb0X;j5V3b5kemz~sCI0kNPlJ$Q7|P3Q%wEpE z&$$YDu|_jA&yN~`z6_LRG5q9a9xfVXR?knHz3_oYp=nC_%}ttj7%GcM6Fo!^EMEEW zH%x#h*));vb!jzC-_~;|tuDFz2Orp?S4kuIQocMdr?Qv5!$J^-Cer<^J>SR7J{569 z`CJtC7W;15a62PM*;_jSq70mRYO;^O=F zzxah0rMk7*KMHON%bprRu(_jCVbvEP%KIVTh!P;7OoJ_)MP8wj?t#nt<PxJ)9lw*NiFPk>b)U(rD9k=Q}1D{@jo z@x#7!tw$6WBw!j+sB|tS z_qTl;TPx|Y)-_pbjh8$Di{#(Sa>`^FZq8|33|w{cQd{Ofv~d9ET=U^>2_Akn?ug(B znti@%GxuN_a_uE2+j|4A5DQE~pk7QvRohJ6=;{2#KUFABNnK{>k3N1>AJB)49Osf( z{I|s^6%0}2OpQ@uZOv{}(KPB``jHdF@hp4+NQ9M+Sv$Bm9dLdV996Kz`>Mlx_=gB% zfRqWXa|8P!L;y&Wn|Rw9dkLODn>L|I02k!ExTeCBwy~&W)Qg3Yc&eCi!_wlKyV46h zqmWqy@uI{Th?N5~?t>67I&*QHb&<~dzy9XW&6AkzaM|h5aZQ$|xB~o%jxrIWrNwej zo5PMnuc(WH9gT0=UQdG~a zj3%GXmFdUX_WY3H^H85BvLZ}17!HXM!4h4@d3jL+0QZ5JdsQGS_47UK`?w4-9@?^^ zM6+(7;AohS%9nL}Kbx2yDw3%P(d&RitV+ucS9PtJOuDt@ovCs}eYtWwnFWp(4HcQ= zm#Z)2*zc^d+u7UNt0@YY5GHaGRILHPzLMqvTJVh4*ZaVIZT6yIOV9cc}1`*E#bCjZYL`8m`kZh{g3L_keMxg61K zf{}Twl)Q8kR#8ZXOz7;-DRk@=pFkDe_J!&co=cZ$J zG%@^fEelBR^143AI~+VJc$pMl&p^mLsN_FfE68BuTE6xk4Wak!Sy5o>vh^rF%|a^= zW=)y^6TT&Xl747Lz`clxR0D#&G{cKW;Lpq+ADhkW`&BDfst@kb;@vcghZ2|}0e$EJ zULJZ0NXcDEt;QOG%DQE zX7I25pk7G<^O9$`ue>|eO(=pmtJ*dqQ_YD>;DS4{$cZ-6kkaeVqcmV#*AK4d+LmAc^NAChkBdJA?$tv1EVXL_FL*VJ$rP404Far`vD!%vWmLQ&WzSA(W5y>Cd?C9XW7E5{pB(?}lxzb1;J5}3t z(_6Y?a0NvufqHntz!`R_j;5QBRb@_%%5e=0Q*N>9D*J3QAGCpm*__z1lHHQqIhN!$uQgpzE8AuVZ>1apnn6-XKAnO*qnjRYTaqhnXiKi z0tIgNG^Z!HzUu9q=i(&))U*psPv%R5;vg(DuE!lV|T{{r~D2H zmY&=uh|4-^dru>-LB*?Pw?_~E)wJar^ko6-BJYi!r+8eiLY1{7j=T?>PMTo#Kld<# z$?2LfW#t~iXN5K&6IQkS8cp38W6Lj>w*VZ1V{jO8C~X%a`MC0zkjAgw#i>sHj$ovN ztl@$)H9brS%P^r3<*PJF?}S21?X6J(=#G)dfNeS)qZ~!p;*#uBX4U6b!shMTLSzPc z*VLTQ!c`)jHKem{vUt%F^wFazIDbS`I4Hd8&8~<^(Ui3LEHSDjrv8CJvlf{Ou9Md! zPUI-bpV9}#6y?RF0^yv-K0@T@b#=T5>jAf&0-gTdE-0e_Gb@o{&GEUt@F^y|zG$0# zPRXEJCNs&7Wdg)ow%|5aTXt^%Y6G73R#WEOe0?Q8-;U>GujfV`rBaDnUkkizZ!p{# z;*o-XS)}I}=YY?yBu!@PapQ(Sd~hWDah61R?_wIM(O0n~JS6g(6+Mt?v*rQN;Iq(K zR^=l@wUjUaRWs|z!}Ru`nIxR|*+bOR4N zBjvv%9x@$!Q`MiPznLnhwZ=u(Dux&Sh|!L5T&b3g$aUT2;Wd7!`R#Z=E~6ICt6I9X z3^gb-y>WTfyAg!+sJVC%?|CQ_SwEeg%%WR?q_(6Gl~OZfLZ<=Ojz?9Fch-bZmS#pd zrxz>ym&v|A{mOUof-9`{Oh!YdWK({aP~+F$R2AWRwWgsB?RVBH08!EH8jXLyowYrw z_@tSIn;S)=2}N)(!(v<$XRcidAzr9B+web_dZQ%7TH3c@; zIUlfZKE7((+>>MD5&*YLLzj%7T$4Als*GJTp$3?3YLcf8dVIqCopjPum#FX8RdT5rYio4G zEs9=6vq5~0>5)b(!lJc$1;1MrkncW%Pe(H#KAAOl3->njTw|E{^+ZvwW(^LUCKIw> zQDI}aHO&{Cj=Kf^>6$oqVIZ^j1z{)-m0b12hx6Ry0H(SYrqT+>2K#oVz4IiWEYvci z7>jLMcZqFti5FdzPv4zWm<2N4Lm%s&k~vhn#I@|H087wm366X|*{nq%so4#?YJ~il zJ%E%w(J2WQcIBgjepGK_1s;kZa=WR%3M23W>{BfS?{01e$({eI`0#4(Lq|%HwAk7P zO4Y>XqNn4F_VjKCytt+W{;6K3;BDN=s!>C!t0Fk&k?v&AU#3h=X7;t|`_OJ|t)z;y zSjg(J4%KqoRL9g5aJE@+Z)?W`B40>kr^<=ffS!wHQtN$;cccDIfU*6J4;8n1mM6`P zDs1J>pBPY7VDsp(-FkYP8r^V~>K(w@0t>i3wNM~z;ax@ldnlpkjlTs_8$6|mTSzc$ML)VJV6k~*G>L;e zUW;JiFuu>@@Zst3t=%!pYakj1lBn@+GS@>RP%}0qv?2zGTtPM#oky3>S}k^C`0|!l zvn(?^l&E@AI(rQvxA2JiXkE^?h5b1~%UbO4!tIktUumwgL)J zQGM8T?RmkCts<`_qo|Et&r!k~d%3;g3{c02U6f9>4YKhWa}y+n69YTV^4k zxP?=yQ^quHsg=`S!7p$7i0w7x`B`YNAK?P3`{endH+*w>*3WM|E*^d(;D`5BrAB9$ zZQy+S@BaS3DJG%vvX`m!elqGET_1>%dKsLLnD?j%7i{?W!voB_NGS8Tro4!})r)we z^hqwxv{aT?5u7}ow^2+<^J)9>Fm6;YqgFA!y)@H~ejV5zo~arP@rRliPS374Lbu7a zO9f10{u09o;Sfxs#P}8!IL2;5JGz;DHDGr&D|EX0C)lR?=gs_WQDtk}oDLc2ia89x z+C@q5mI2D8%FO70`7i4+bdN;{hbObEo9JpT2hu1d7Hp=D$9XDOc)_*$ARtY2y8Vuc zQN?PlT3-yrpDE3Ls_j=%S_wV%B-YE~(!3_r&ImsgUFcX@oQD5{cgK#B6{?7qPug(D z`y*VOQkh(@MwL$NJ$MnK9A+N5Ha$}if;Ao}7dbY)H-k&(+$fp3H(Mbvn;C?F=%cvX zA@Gf7$Mp`B-A{`eyV~3OaR2_}pLWG_TGvt$e`xZm9GhCkj$z{R#p7PQ6Gohv!MpDLe069I$?FU^{8Kg9Os!+q zu5C8$lA_%n{pTJ0QGHqhlcvbZ2bosV;rY9`6tQ|Q&eYP*b5c2G^mAveFDN^F@op!&}@>!=qEKT)C~}g#aw*wp(b{T6#P!XTXJ(D#Wa1tgd|Y=|H)dL*p$F~&+t{{>FXS_ilgzvi6XX=yWkwKQiEkHu=${T0lu z==W*ZV`pRPvd&d6P{O1`?uBm`hJE9Io27-S{ zVA|{7B=!3Al@&c+J<{JX7GuE^f^ETf#4Wm>!-zy^!5)*2X_VwI$`M@mL@qhy=CG|O zr?G+>`^l}FWh*eCbizPfB>)WPKuMkZ_qZ$lTS}9K0<6a5;{cdERHw|+YLK7A2!Bm` z#f*SG#+{Leb?8B<7(Dv97mo_Q7QxtfL+PDhQ2+#4c<2c`?}<}OJcxA^n8_Q0Y<>=m zZ;OXN`Qg9l($2e)yhK;qfHq-*(>y1Ks76c@U+^m=!GER_zi*p+8}fNO<{3xp=+Ay>cV9uO0w{@wzd^-~Q>} z|7rf8XL3Y6g}{cWEMsI=QavFg$1z>3y(ZF|fuMM%2YufO2;4T>w}1Bcf94-i6M>vO zv!w~mq^$ZJZ)rL3=PZ+UR4q@c~FRc?4v9%$(2yR zo-|4ds*11D!}IyYKLmqJrmsu+`r^<6mokkChe}oqmG5Qf@+jFah0Zn&Fv*auh6KKLb$fiyKlOv!VHuF7#u=!KX&3Nw72ty} z%qhcE8TmE-TQ?pLTF~w@3B3X7m*Et<0p1ga>SHeNcGc}Y+o^NLtuYNfU7$mv;Bht4 z!XPrzly>gEknOZ=R25Bilj zq!(vlS}P=s*sjfkJ8N=?&6{O7?76&=dG#QXZEphV|60oKW~}kszqtR)Z~hMxmYM>V zf#G6{_fqI9bS&XsmFPL zV3}Fb`UTP4Fs$UO!Kz&km%&dkbZ`VUC%8HMYP8R@Q1w<-)P0VZ0gW5kWf>kU!!=mw zOWxO^RqOl7|nYbX0Ho z<;gr%y-1xfZfb`jto&0M@b>ZTRT5lj$<;^!hk~RJZZwu-Xffr8*Ul&DE)&fNX%9QS?@l5+#@ZU`P zCK|4@pgV>AR6MIFLH|C#dW1e&yc|1OcU06cB{yz5iyd|5fA<<6{mlP4HIW#|b{ zx(D7meC~xmm|uy5LHZ3qn)e22y_DBrnORXgeKrMy1W zYKt7V9|4b_)c)SSyNE_cPw944-*9{PzcEz9VtHs=p2P>Mgv%=wbMiSk4u9h#|0RVx zL8BM#_p?rk;N*&0hnrWxwe&2X)7`gr2p|XU1}`7jde+TWZ{8uQQ?DiM_$n}n>iU#2 z^RsCvMgTSQG(A=pQ12T%LB;4I5@6KO=MbDYDe5~$X|XAF?Ka4Rs}b}|pI#(<@f1n? z13uv8DVH})cQnmF1b_00`ID)w99+eKD&-ulfhYp777Rb7FmX1y;e4R`dLk>!)q@cl zuPq?S1;2TH!f5`hZ~ljR@Hyu)n3L-x`4i$54VbYfpu1pg5 z)R6>?e`KL&t`e!qJ*tIOlO1(8KU!70i(i-0*Hfo-~&s@LO%IAu=AI}zFVVrRom?YfoB>KF+^DebY9co_#|I( z7y=zYEYD5{RAbQjMKuNA<{_RGjr4bvE$>~R{GXS(q*`sy4I9(jpl<3u_SiCZXAN8{ z3cQ(!_T$sH0fYj{c(WWxO3#55oPHDhY13J;2-O8kRRUPX6laAedV;yz#YJYfp%2|I zvQE#&6_$t3h)^-ye#$$knT!+oH=g^zwWNtdyBt-{@b+$C+GEPs77<2RG(T}9T~-di zs+DbB7Yx*}w2SmXbIT0eQ{A1r8|De_Yutaq^+WgZgeOX7j_YuG1Guy(h1w(&-=lr` z45QG z<*64+9z}~u4XH8K4rE;rHHYXKR- zkU;a}q^SD1xS&p!D(;^xYb|(eJf7@gsOwjv;f$hf{>uvfGm9KBxP)*i7%S<_O+pX~ z=AEsZh>TJnh^n|jFFBr=U>&mK9nbVBQ}r;1uCJjC$&h5Tf$Nc)TfZf->d){0*P0Hg z>ivxZkhf8;6f<5f!)9eJEPU5RpPi$f8bm1*HN2$ZS|3T^n67V0kS_Fi1e^X~akqor zM7ToqveRh~F6K^!ZpMy~W*LaceRl}K_Kg?^x*;_^no#pw`a+eY!KseN^UCkRBMFPF zj0x}jstrXPPmPg3Z`d7Bjm3#!|jo` zS3@arIHQR8<|z``7CUFPp2V4c5sSUV2K4b0m_tsG#_hZ#AW@6j3?BXDV{;&`tPvTh znMuARhiW_dLkFr8@2AX_LU3SzyIZI+NFHq$4u}WSN0lC zbZ2ttgSIuPe^XvPw4{|c2|Sw#vluNI*%QB)Wp$8%L#rI%7B?V1OrKF*5#7FcDpXW5 zHFPKWSrTc!gRI&**!S1U>{Rfr=8cIPh*Gq7be+5(x7IG?-#=M&D%c>+nx|s2U!zjB zby4gxq^N~4Toz^$0nu5{C6vC7 zW=#HbrIfV#CD-yzwhi1=u=+i{S#rixqH&?*DbxJXCePgL}q7Kqn3c@e6`4()QGjKK(=EaH{dZ7kqa6X zDg)8mb8gHMe=lP0_)Nb8UO}gAlCoi`-42P$GJPeG3u4EZDti9xQB9C0c|R!%4v2Go z@%0*^aJ5mDAzWzGAVA+Si>eKY>AC`?1%n<8d1i_&LEBGzW2ulAtOOcmbU*j}=_+{H zvTknii@oF$^tf$p*ntUH2K{KI$)%`Up=LZ3>Hn!#J~CsA@H=toj#^m!tf+$d9m4;K zeaNHUWetud#&5jZ4U`;L2*igo?6TrIl*;-ic`!A`{rlfri_@|^Jj)&v^Mob&={K{p zrsp*|j72@=PjKzmN&%8w1{UN=`)R=HuSKh9V2Jr$qX@MVzPS}_(ASmR-ip}7`6Itx z3aQ^ab)X7EcF#ZFC{UySL;&(mhxOJ9KBVDtk%pgAdt56oiV6@N2aM-Tl0o7~#xB>h zq&m=E$t+7xHC3^x`C{q2{1vOuwvrp0-UeFwz0M?xb7UtK;tr%ezhdoxyXV^ z$xT&X%8>_A>Z7a*!p|OA7_T@~1l?egBB7eNhqeHZ$o0Z(O%NQ}xZ3J)!Qg$n&(b8L zL>hLnd4VR%SLNZVw@ubN&RkC4Wm~vc!=o5WWqGkk9|Z zs~^D$_lA7F<*#V-trBZ>uIHB_jgOUvU-X{8mD{QajDoN9PmhBED}R~)fIw2ZX8iy~ zaEk5H@0m8N0u8;@w)^+L^EQUHe#v&)gH3_NM#`O`C~K1Ihc-6Cb;^5 zr}*m)S><)0kWmyOsa?B&|LG_7Z)8v4kdLW#vSv}y2@1Jg!>nw`S|X(*()Xl;AC62th|BIE1Udi1uaANWeNt^Q5^Aa zS51R}L+1ey=KQ{rbxqplA^akblPzw0F`We76V3#GpLG!uPM zcUB7mESi6F|99~K0l^4s&QQ;X*V-$32zuQ3Y(F4bHBk>Vy?Tpr&@MXFqV~3N6ND>! z11625y1nW&)9R#T^IH9eFHjflUB?dwpR*@uxY+}gI%VYH!B0gAAY>s;6|i*Z7!1bp zB+xu3eCsHg0vvhylV6c+j;O0W!FzKTu0bxOkL&f|JQ}gOV_kZx4 zzMDWlowR6Xhvsn%l_lRM(Uy`zy^`yO4Pdyq-xFrw?YK?MnI|)r*7ExCv4sWVXqBeU zj{AAy6WH}N*^Q%Z@5x6WH5ggBdp&OweH>&b6h`_W1`o7Mfm6Q@t*~uus7qd}7s}Pdy%Yyh$zN;g z`5SDaUL;AT(`(BWU92~wFcDI#CimfQ{D!GTQw<+u+)@P8TBshWK7*rcs2Ki$K4Y1w zW(xB#i_mry0b|EJHhYfCC#>}=99aZx^;9^zaZ9QsR4Rv?^q$CMF?EWz?m(3#98Z@_ zv^GNj_V@CfBdd5zX#+p}VJ&|YMXR47ptsF-5;PE=UtWY&DSEPXW72-bHJ+XIAn^VB zcOfP4ns`IWP@)P!y>xza5q-}$mu3nafk8c`Y_27-y`oh?9BQgbjX0WrPxqqUNMVG5 z&eTg0F+cOh9D~P0@cWm%IE05xT+e=>PMiI2LGl$XK_=JzWELagJNxVq zO7M#bC$)}5puF~Lx7`Kgvl(iCaCBTx9BkIV_PPQkp>FO}&m0ptYU-1GQ^x`I&!_ts zr+7pcIXt>oY-(x028Phfel~mU4+Nb30i1i0TwO>*lyedW7Nx#MOm$2f_^`!4IxY4(zFyiy53J{HqvmQ!CZd23(F zVOsNA!W7hM?6E9^_VwZE3ZM%Tj@Vv85mm-FYc&?ULi3L0m?$yXFP!TleqMSuXNu^y*F~Dm}_I97N0>-iP z4fAq|&csnkTAitexcQ$#MMSnKP$D|oebTp*|MA! zei?W}OvEUOa{vCfeyg}))Hz5;7rGTygz9UIkU#(CuRri$Bm7Te#)xf`_5=JHb)kMl zc#VP=p%-mjzzuCGoyhnra`6J%e69Ysey`{jAhqy~G)U+(H`S=McUXt_MO2+1R?AqmH@MG5x39dj>u%eT5k+T7Fj z;g3(y$b<{gFT_ibfT!WCf;W?b9R5~%nC~*~iW}9s^mO$u2-ds|R%p>2_zVG490o8w zI1Kt^p6!I{J_*55Hh68$IfeJykMt_0`9$FF9#fU`)`Z|b!n|J2Q3%iN3l#Jk9n&E@ zkPU6e#07+}uD3>)D0Wr{gZrP@UA+Rpa@bwwuzx|K=XX4U<)N9u`W0Av>z_?Ejb9DcPY)^4?f7)zlTDaQmwy2!n;ALnqD^UOK2be2|%&Tj&o zQB6ZctpU&wd@PALEh6MOQ8@ySMJH$#!}a5*@1phs?#(!_PnNO2Tbseo)s>+H$@4vL z6|Dv9&%4b#m})=wBvz5!Fm5>km}ihA2nl7$R*<^p$)3z_G8-NL1CG87e&S9!qs+dy zmm1<+DlaHfSd;qQ9|R$hh};yX_B1#HtetEiV6y2T%NO{QvyV zZ<51Bw_lcMc3yYA9d|6;;sdP%{*WTj2b!V@_4qis;augu6#IK~Mn#K@kn{VBFI%eP z!@#j&X1JYSeivXM?<;OFzw(x#PzeA;#n1g~Tn^~UM7_Pq*I2UAJ&+@-9GChz(D=p; zZG2IGCAu3+kn0rzTVDdR3VZC!=>?AbIy%?qQ6y+M5aKd^>WpnP2Vd=opPnxV8#^Zm zN|ko(l5Zas{?S3*IK>Aqm` z3=-t*&AEU7z3#fJz*U(V63PhOUILcdnGeVi*$HTj!0RGtt=>gcu-r+NuA#pLXscd;zpcBlzBIU3B+OHKU(PTxC zdJ7PR=5NX~HRiI@7wSP|&&QTMA zT?y;oQrl|YbOG{;N77j|lmQlm1c6x*n8gS<*0YGz^Sfz}OJUm}7kve0WQx>F$!`AJ z5<458d!5ofRc^VD5}I4q%Nf`#TLq_KamYm<$Bf%F(jH_~-HaqR&zz)&S~p*M-IL`d zF8P`%LDP_Y)D9y_J)0^D<|&uDo&5-*(i;EhANen~ha#3O1XTQGO$bOYgNddQ(Phf; z%;2pc^RUXTKs?#5Hz?a>4X!cVtbHtG@tj6i!ELJl5vDBqeDv`n(+AHPLHpXhr;sa>yT=M}m%=Z?E;l%?>X`@`2{`EKO~ZZm(tf%tJrz4D*#7R%Ffq3cT? zjbKtZWVetvnO(H1Nr4?xOE^5Nf4%9xgu7TT9*{U=G?668+l%8iN8^qiz>HJL$s)x4 zzrO#w`u8+axv`Jt0X>&uFN+z@FPhJ<`+Dbv&V54UbW8ElUU{=H*Ad$4R0@;&7Za?m9u98^ zzxT`D(8V%ShzJ0BK<>SvYH7R};gAq>EpYj@h4mhuChAvszdi>-VGfy7%XeH~emhzZ z9s!TAN}7mW#Wu>E0dkBy`WdAYEk^K)`vX^o`FtF)VKMuG=SNYNd6QF;(v42)DO?|` zC!goM>Vi^p-ltT?8$wj=VZDr4EfI;xcO5rJO#uO(xZ_=;vM_5R|NaQ{o7I#JVEuwT zng|y(dM{QbXj$7b**4v`E)=6cKk*tA%5`W*OhxgAwE54l@m)^)+rRwg-^Y(B*Xz)Q zx>lOkHq<%P40N_Q9koOUM|y-^WRWu-`4GN*Vg)|61PeSiIcvAUg*)61MNKXzt9{_n zmM3*Wu(Ucx53+iHyEcxPoHF>c9syPw&MPy{$fX)W)AT;VlJloml+O3EkT|5j)tg_S zQr6BaUbY+{aNbdR+?Dgz--hq9eNUQMs6E6%&^toM;#%h%D@I%ISf=@;fKp9!RB#Cw zvtXlmp1p-4SWyy*lF7wYjXBM?P3o78)H!d>7X36rYg5EKLWlSTDbj)Wr(Ob%`d!lr z^(zwezz`c+K2)7z>F@ob7Nqy5ze-`1NPaz2nHld2?hGd9S5wlV1{&2rL;L`)k;?Ah zQUQN0Z(cQ>ih+nRs1Q?Z^7VD?1qwk8GQ8g$Ssx0ds&to?(%`9xUpz}X8u=@tzj=5$ z&gpC4?*QuNbPcY=O#!?Q**N!lWoxkpeTSV8$8u}Y z2#Ed6$9}^G1V(P}X8Nn>ABrO-^@j<0wre@n4~lC04sdeB<_MRAyeSE@_xq**FkS`d z%?AP)E^#mTkyZV}*2T8@(gfo?tP1A(ln|Grz^z9-c(p=U6+=r;i`!=f%~HExA1tMM z^c6t>!AY(ZH1X{q*xQ3mQiIdmB&*k);X2J!S9NMvn!abdbH?X&=PHaV6~06SwRCHA z*e>_-i+P76xn?O@wh+d@H+z7Xy(m)sQZ`6nGFg8=!ABHvpEsnUs+xc~5tnR>^pR=Z z$~&sGw`3$~A;p556yEQF=rfI?S4+bG{OFP2v4aFGKAD7U10pxFQi|!NPuIT*)3hoB zdOrMF$N00_y?lINo?5kdj&_v~w*F9{ity8{QC%^=n(FKEsJRRW-H7Vd_ZX|(@I_rY zA`uzh_Dz*N3H|cP?4oH4#BL2A`dAU*8cD%LcDdHRN#{V5@~LwN!0j$Gu6(2bb**fX z#XLb;ytRBo_K1fOu72b8j9u}^2~M7Hl+a^gs_Ojmb9sR$9PC+pem7o^^1w!jsmNTd z0Yfg#O+YwN$sL}?gPQQeCwpIbA34iXsAFz4L4M^Ex@EUPz3nA8Q*qd{T~Bz6qm`-? ztwxJn;K}?3@p3zMaGysx8*ZS{F756+btF#}U)NcxIZ(DXt_Xz0Wn1Dnb-QVJ>GDz^q$$!7QB;6R-{ib*NGP?H?pI&1Rpefu`B@y?tplhY7ZIYCPcl4DI$E; zhZ@HKNr*+)=bK*XWnA0OFu>Oo;)A#)F6ns*_th^ye)Q;5uMK7Mx?i?XS&!D+_%{LT zS_GXd`Og$W?eHN5Z!y}Bnx$r2bYf9jtvq6SmezIZkdiyNLzWSIPVAcTgD1P2Mep2M3 z9Te3Q%rsowFo--eMU(km6i&s2eRmnv6e+IY06F%}kqN`n);hwIBG?D&{{4Sd695*3 zCQ&a#WaM>OHQb9{ir;uuXe=tZ?VE?ReO1X$6Losq9`F12uln5LVCrjfivKfJb>$3) zez0+StC}S+oVfpi^FEKT&jxZ#EX_bQI2>Ry8+w|~Xa16Lq@^Gk9D2ws0WG>??3sYNx(&AQ64k${+rC|s z$!_ zUr$}C#l>pK#D(mS@VM)yuSyM?XHID2xDv!B99JI~XCDiEu9TW-ok$4TWXN5Ybija^ zXBi%l3!F{Q0~^_H=MkvkRE$5S-Za9dpp5aG$HxZ1ItjBs&mI>SI`?t1K0rpkf!5 zE70d+Eg(8U*Ij(?^`wM`9ND!jK9d{nZz(tdN0jNZzgcwSZue%~PRU)cpb(i@uQAjZ zk64e6Vv|02d5w@)I)PtxG@hy{qvbcP<*Q9wZun?TN{Yv4s7zL1iyp;fTP8BE5O^mD z32)g8qC~3OQG}qfCxOCR`0kPCNj5(^u`}3JDhw@|?HwUX&kO#&hD%MS*Pt{a)Ur)& zZBuOzxC*U`5++WaCArYTMnf>Z6M4-x@=W5_2|0rJ-1Ix4b%G=B7AM6~BO>At2d7@IJxD`~0ZByCRh5?w(4x<8utTCzy6@_9(pXs0rXF+lfP?LJ#?F3DI(R z$FO3{HqY>FFB*pW1s7oCAAL6JOL>W4u-AkEjw*Q1%&>Azb3|%?=IXX;)>2VUs@Iq) zAqM47>SDtJeL*HB+Fgfv!=4OW-k)HI;!g@&k76}Jp@Yyo3JOq*;N@-Io* zb5v2cW>9T-u|h$ANFBP(!z>LH(aB*ZHc>2pMS_T_r=-_gnKrM3r{w8SOG3Zy4nORg zWlZu|DC@F&-=?~U(^*x?JOF_)0liFc0TqLeUy$`r{iE7-WhjM2gNIZ~GyJNr7w9YP zmtJJp_n4yHwRu26O;V5=D>sOlKphot8rSiskpG4Y4; zluq*4frKSAziv}s_j{zg!4t}>HV<3`u;T?nK_UOz+o@o5CZ^;f%3+)36lKl|AVf$k zk9jSIvRQ@<{C0Os{T_~!dJoFeyw%hb*S^BokZMwo$#Zz_A&-_PahXpdM2_E!V^lv= zytzj|m>ipPur|?k!gB?T6LDgpDrbPT2e}hH=;sNKd`w9T1ENriW01Kt=r+ZqWq1Zk zAHaX_H?C0eUL+|T!E7&;gX;}J%|Y`>fgtD@HR1h;qV(@bX0^V6mk6Pt@JlhvM%)zY z&Ly42Ga^(&K)0TKeB^Vo&FgzVsNlt=?uj1(?x`RB-gB?`XraWxmaA{hU0hCy@0L4C zd0{4*9Zn;>H+{w7Uz>I*V2X_T6$3$&A@~8!$m3(5|FVkP$mXa z&O|{%gb!N4@bFIHT}#;b=92QtLH`$s`7_bq@7rDTxTy3%l%7(l4cOV*Gi+Oc_E6h_ z9KZApRrPcEp=YhGOJ{xbw+_PGLC-{B#pSW8f0TdW>K`=@Jl-fv80ziD74ERQM5xAA z3&c^2fUc_)%OT2bfYJ!_n(k?d668?7HZPM!b9Wwz*}ziS#5$22v-F~l><}`@OtyhH z5=xqrYGy%szX23flOwX-Wt5J!Nq+snldrkYQL9XGxDLf*m;kZacR1OGJFjSgJpzEt zqoTLXq@DRM5Bf*%)A5rXpE+U}M#NonHP!NU4`=oX+8pB2>m+HFrH2chbk3E%;t`G) zaUVUIx1VT#=h?C6r`byE)QU#k#&B9aMmwO1yN7*}9c4C!l`m?m6-VmbC|q~Sij!$r zd~~!7HGx_g((X#IoXu+_dF(w!M5=sD=%#E9Ad!iR}mvB<;aBu3${W1J1 zW_v9@?MOVIZ8a(YosBi+$hwMKo6V)`Q5b87G;;L&bR*HaK!`ds(F#Q@X0&u(gGQ5JvlVzSs*KCjIG0fUs$U&NBVFU6@Ruh`SwO%4Z>uXk1Qcw8IK}I}>x6 zT*6@2YVRn1Q5z(WTX761WOBso8>*_cUjsOEczkdn^zl|L{kG$Yem<&qtX+^V=$;-b zmJWAQ=tu`5ctnlXLrs`}#fO>`%2$YNAqiD6#I8D07XH$kftyX~cy4L>Y8K44x58T0 z(D(0u^gAXPT9PUy`VUI|Ln>I(|%nPrrku)<)NZ4XtSBd$s1 zXtQL$i5pkZ)S9Slw)(g77wg}cxxe&IUZKEPJbf{}SoQ#W2dOVTSw=pqlKljus4)@U zVP387{b9m8?H620}8)a0a3)%DF#f)olCSb6qL(LK#_dztyobgPULFQ5L9jXe_oBzQNKJ=Gz z(YzzB>XFeig}gT#yzF{8-Z1GZ{`3U~p@+Oxax`+LcxrJS#vF3&SDkDiwC=0kKKjje ze23blF7l0^@+2<1;aQ=@skdVDhV>N}0Fw@n1|a^6cTuovm@e{%t=-BfQ!~|s@sfZf z_a~BJ*O0c`lu7kgZajTj!tNIz)FjBF4j0`%$Q&P`6I^N8{bZFrg0S`;;BHgtT(3~3 zTnO8bKK4X*5xuhqtB_vFRjKu{Woli~&I(A4Y&+QF4 zrE%_7c{cJJ=bjOgQ~cgs^7z3_EQ%4%4+1EJ2G*%24s6B=mAW4K@}6A8DW(=N;y`6Y zYj!ZcL~6Z+Q1%;3Ttt=754KZAdN{UX5tFN<{)zJG$?Tu*das^PMF)-8D&iJ81Ev8~ zG7=Ynn{r zd&(RF6rc`e5L}u@#>toShW#nWu4YZnqu)p(3Yb^#CLso_aZg+z7DqW~1VuEZ<|lr! zL`FK{F<61yf?-rFN3z>weMpub#=Iv&XdO&o4UJ;AtgV0O*-_KqnC$q(Y|zig+xcV~ zDW4oXIbGD6y^i(MZ9luc3?=)yvIpVNTA|=`(ENgLeK&8@)cV>V3a!QYf;NGp9-U@EvqoN(16FP{9p0rbc z-=}GP5s`bX-XqKTtd-w)w1_Jr31c1?(?JYBTQ=f*;^SA+SMllSF+yHOOSGHCfi;txh{$z7oeF; ze2PF{xAbKPJ-uo z&w$+2s!-J=i0h-UjMNS&lHy%!Il~p>sXrOrFuYFybJH;|o!N_6^}B?s{ylDG#Bfn? z_n_#FhGri7Ohzm>u75U};wTNR;5+snR3prjbJ`wwg&_x57$sV0yiBGv9XUDt4oV1dTicAkX>-_Z9!+ z%ANOHh{LQ^?m+*cmPET2UXuM=vp<`KFMT+6V2C2V{Mt8tSVve>tsqi$+F^r$Z1*gm z&aWaoC5efRYB|KykX3~i%@8A~N@#&s)sa++V8lWx%ol|!Fg_wccC0KPVMpq>^1b8P z^2ZHjT-16Gwm$dK0%?!y%!hA8;c@5qw`huId0pB3Cvw+5WOv$lXnyP=WrAW5_mDb} ztlk{MEebRaj`5!aiC2Dl2vsQ%6c0#f<{>tMe=~Y9qNT*2aQy@2W8`9R;fPPzoLZZm)vPXzYrOeTXj@g;xL zE_!NTf_1TET%1k4u5&nkxGF=uQHdl>RBEe4YNkxE*lA$=pdr|`+L`A^p)5YdMlEu! zxVJV}B|q!m;Cx!G`a0?o0>{b>$1jh3nW>!u%8QplDFWq%EN&tsg?SNIz~ou&EZ+My zb(g&a0lR!LTQL_v(;)iwrl5tJS}4W$iI4ta4#J>M=Kt1ApiPvF3lp@@xinD z0$2h(XQ&mg9clYqQdEs{U7EEFq$O!CL5WiN8Q9$?2upc+n zp?e{}E=((2 zhuILL{s!i+-4iMQ0OV|4QP>&CM(NtZT-|4ytq3DtQQv_wQF?21R-H+BFEmCAr)mC3t^dSc#E_>Kcn3)wi{3VQ(M?mU0{^y!udA_d3q0Jz_0Oeu2!Zi)s9 zjR`Ee_HZ!K7G-YC6PPEn(e1>KAF_6H$O^}qp}1*cA{J+h?$JWrWFFN^-^|I@MHy+Y z&GhBc;{yFbp+g>rIY%28$`V<;9C5-`m$;~fr#DK0a5Lcm>>GeI6@YR+e|v%``+evx zbBvDgwbRO=Xr?569QD!Z>UM~YHTCbKPk-8%CXnM_Ev>c5*1cKvJNAY>f4o%_M{^97 z>?x{3ujvg|yjOfnD+r9hs7NYIK(G^8cAoSo_}yQ!ZHuV*C8)mzH)1s_aZ#^h8ivl{ zT7!XAv16&WCZB@Kya4=qEl_U!7y{b&R_OXl{MQ88=b;{IrL4t*FgjbHsz`jjV>1<9?AQA)m;Fai4-R>68t;}+LKUa!MCQ(qv<{eue<0JFt87qFaaenR)j zn)T zUSQ_LF+IiHDryjQ{lxbIs|O~DB=FiWxzUVTnjfb9_2hW}sdsSIp-lH&P32;RfUS=R z2*Jy|ryGEV>Lh;;@UEv)>z}f=+BZ2n%oW6PWQj(^a$k@H_#~DJ_;(r5bo3(C*iWllrlT~e5bz4V zn;BH_8n2W0fk+)4XE5hImwjS5SU+J)3s}Xq?3kcXH=dY|(`AHiDUuaN7(IpLf=9>} zryL%eBEbD?4+Jp=LgmR)&H*j2nUUDeoYfTfNw>4bwrbW`#Kq8U+ea<9dyH>Ko&68J zemu;~mic-8PjEO~VD9)Y$YgamYgV7^)tqsk!j54jZ{R_#bAPn6Q~x~7_HtC4n7uGy zNEj~@6YYu`7begj|MB;0Rrl}z=#OgkAd85h`myur*EOdhnlouhHq6Tw02^Das4o#j zHMjsUHQU9jr71+jWR*-@uO2B+?F@RiY8+NGnaJ9%nZVI6*Nkrn+tOpb%?YWBEz%Bd z=5fn@-fjR?@MuK>`(G1_A;BZ&N}kR;;bZ+L*hEb@Q=T$om4~jCZyfJzo9PH6B<4i* zt^FnH*SckeIHVr19~k8aj2rmqXOL zQ5cC#mTlZ_u|$3OjrD?TSIn6?4vuvbB*mD1P`!){sO z1O6k=V@Ms}Nvn;UL2VO<%HY$;=kly`euXh-^1_y|z%WKZHssP%#52wcJm ztvKr&qBU9OoP~3p;c4|$XseS>;asDB z)%!*d;aWPFBuHf1atireqvFWwcE`}XPYL&3NPIwHq)fJK2DN5+Ioz3K&lcrnks)iCf!CyA zbCd&hiworHs;^X(>vm~;`#0bG-yW90Ma>&j10Fqp+6Fx32g$FuM+F6x>k9XAb~(Jm zVa=`KpB%gizrwBJbHkTaJA-N*MY7RQTen;xaecY3B1W;P)Q@j126wVY%+|Xqb9)(k ze1h-}Mo}h)C}zRLVN+WJ^t58$AAM-HqV9NfzodI~RK;QFvrtB!GQ`L~GBGiNLXF#8 zMt}y_#CN)%jN;mTF5t_sLm7GgA_5&-)W0jvWKzmetJ&{I=w^8Z`q?Z?+oB8f3p4qi zaFARg-aVj1{;)j_|L`rE*2+B?#1jgKYiP}TLz-Upp%^u>5li!M5QvjT(x7)+1`i!| zEcGS_&%6z2sW7u$LNbM`yXAXf0V$puq;A-`VC z7ndZV%$m#n`{z_aFLnT)vEDF=$-Ra(d+HLv)9%xF|9{^9D-ZO9%yaV#Cs3Q~Y14)n zJw0{ZE$*AN;iWFHiEBG0=rK7>H;K9*9)cmfYTk$f`7&z7+~vzohd}1U>d!|Z;`$x9 zSOo|CyW?pPdZYp|A%Fcv*1`ZoMK25yHv{sN1xrLS7o`gPMy?O1NEpT&Q!7C zu_gb*FFi_CyvY-hSms;WGVu?t}**r=)a;*|5$OZut z2&)p;CKob8frfhRNksR%=xNOmV-z2R$#{%N3cH(t&($mdvbzaNVj`okr0J}iC%cHN zeyq{a>a$L<{}I2jO3=)aY<|W^*S7fdma`%B>b5| z`h=b|i82L@l+Cdwr#J~CpFlMS8qfQOf9UObqEpoe^6*J5@#^K>xfg+*H-u2DGKkJN zi`R=cYd%aWM+E|xur3BcM*SebcO4bV;l)@sgCQywy_FyQ$q)a<2eI>`>Pwf+f)?J> zCj@tE5g5?zx?~bgsX|tn1Yl^n8;Z*)49wz+As^4%5%KJxfq)>mJ6r!23owhga*hTP z+(+*-Vl|TiIjwo&ifFy%Vy>>Co((YXHC>i&KMw_fO`GEx3YlLpBrUp^asEQId-sU^ zP+S!T_FE1{K60)3{{0o{J2n2WT-F+|5k3x4fRcuLE)Eu#qzZO(UTT4ukvrzzm1s#N z73GFI&DG&mQ@UvmRIkQSbwN*hgut37NV)T^xN08iS3WrT+p~#}m8f=`omec(KouQW zb$b8%heZeFrGQa3k*GJ@=XNrB3zjTJrMiv8F^)7%~eJDK5P zs`qgo7{E(HDOXl{A3@gNj6S=&3BS`ZGFHE>g@BN!5niwN+xOr6rN81@weOu!gM%~F z{;GW-DVCV)s!9hsc(Y+*nhqy5=!>Zi54cWbdQ^X>`g2~wz8vH~B z?u$AkSa3{={I2_s6_u9WwMCz7~cqm1l!XA8rhV9$rCq;0>3FbP4}zkBc79Lj>tZ)#V4B!&F!B+P|Vj^vK3G zF-lz8{NMMFsgD=>A+ZF0Y@E~KbQwcqXeg2w`sSffF2&ojrfVLU1Aq0+f6)IHIXPem zhRkbJePreO)?`*`6uy-yTR^a~<CT_612%gGQa1A4y*BE>m&Jx4} z`iOOqJ3KGJkkKaUHQ{P(Ii+*EX5}hCHN4(6 z8?4+R>x-6vE43-8XGLjzx}jJ;TUqla-296ChzwhCSBR1|-8p#LHXwN$%4EG;`ZNg5 z5McPK)vLEQiGULh3)5A)PNdONoCXGI7aUw(FD1X`RXE0|271{w&#qJu#q&ojLfyVB z?<-Ki=78Nz>poHMDVWamt1fu=C+NcRtaqXgQ)6R(epS>~bFq_{;e_* z|9Z=wUtP>!HG`oQ%=;+3o)aV=c-z_s{J$5m4_BP3W(k-OajogE{dmodpDq zvwC7Zauqs4Xr&y$e72Scp#IZOKCzy0Mw;TRfw$gr|Ni4gA3rjICC1m>6;vEZo#})#(p#mx$Ic96-_RA? zUn^U&EgKa#i?g)BRoLr9IFkVH<)E%sF@H~M_V#=lI6r0nw^W*9-ueu!q~vwTTznZg zmOR!Lu4WoenQ0`o0kn8ZsPU*zCK2bdp=^}IvN+s1_Ct46aw%r#LS4M^8y{n(pjj9B zx3kdWIblXQEaGoOtNMeNr-JOR;=?5$~@aCR$fG5juToOpjn!s_sO!%G>liM z3O<6rCw#5nc}#Av=Rc90O#IeACx#Vgklk$58n#7IVT6Wb0t?#Dt{=?Ny)vaNKt)H# z5E*a%7B14vq$dS@+VcLxANoR3JmKZO*9q z5FQyt_&osFt`-{HJH$|ZOe7VBMf6_XEn8FFIBEb9<@CG-#AI#)0{JDlnDbzo6O1R* z0wqZL(6XQdWN+060D)!vNZY^rA2#k+70sQ}+yzZJ+EQnU7cQ#YOY zt1ER4(AwXcR6wXzv9f-KW#M|)zw=u^@&@o+<9!tZ>;Mpsca{^KnBXU>uc+>|#j&<~ z-0~i91B1^G;Ei83vaZwdyT4X*;$F_79_+DYXwifxM?d=1gQ!yS|C07@&yi-?b)WwQ z`^k@Di;S73=F;fP44c%9nYdIHx^NpLs*26w1eE|1Kvn@r41g?Dm1&1fgOo`p=7BjE6zECy1SIfPKTd_`?Iv?IJPSj6&=&V*dS>3iG91=ixRQW9JU40qp zUuBlTq;35ny@030wEc>$85U_u?;RQm_|M!^#B$U!fMX-XfP(b{X(!YR#2A)PG`Y%D zR5SLM?%^R1wshT9OIq5^gD!dbKMc&IEQXJgA{fb;5l`U@*7UwB=6R5QioR8WW9z>=r$dtW~jg4^9ihV{e%sx$O4vG7E5b>Hx zcYajpoK}(`*BxiD=CS$~;ThoND;DAd2ag!2O+06X@`z;l)QI~C>Fi%@4Nb6W zW$MOR|2{y^``I$+{L`3{KJS2@mI-c@ZND7r8e-_)Bj}LUoS?|C zq2XEe!<^yJ9>{DzJN5pG%TQ&2~F}?JSxGVKTg`jX_@t|Dydz zAAir!AN5J!YEfRlf5U&ZMtOfda0}WfXt{^ujjPj?fM(^Fzf1^MQ$!U4n!acp%f`NE=E7R5`>~C#6=Hk zBVkl&+Rb2$72mEAZTijKRq@vSjk{^@8btah5RK}0lckF-n6F4@5sqF?1l;0Bh8tu@ z+3m^8A}V&vd2h3XeKzm+VBO6uHg^upg#`_^hlTK6@8jf8JCsR_4D0idaAB&u)k&W( zgy+|kr`Hd~Pr}l`g6{(S_kZ6XQ`__~UbY6DsMp$RmT{s}dXhEncN8?hk_zTW%S?JX zC`V&GuM?s-9M{K#pf9Iy{0+v|EPEcS^H2ash>*@;YFt|y<3-QuA|+Q#0~!|F*wKg)*nN6riF` zA!qHD7Zjp|q%>@qA;Wc*X7oJf{d+a(jClAV$$1R?h`eB#Uxfh+3|P|-2X^u5b`Kwr zqP|>fFkfwD4SYy3{CI&MQnH?Lk7g!i_rlQODui_^X9mQR3!1Q0vIzpC<9pDLQ=E3d z5mEX!6B{GS>y;;l*O%y~p`YTC4T-M2=xQ3gS%-m_mr(mdvvfv4A2|#ROv@Pr&22lT zVE;U1)#jS@C%zY6wxxTEL$JnT(~}f!667Tx_=m{+Z_>(^a2O&4Ye>XqeS z2H|LkkpnP>R+e21fzn#^Ko+pMdgG`CJj^}ZkDDnF^t^pay?afqk7}5+tOQ3qF4W0{ zM&}s9;XDHBJ0@Hf7iepv2zEfnh#>z9FkxY)aqZwky|L6l>|Rz7qs^tU?LqrFo391Ztxk z!7SP;a=!_dbe0r~vkS%zwZHcFh@yBslS8R!m$U8eubao1K$-OlbY3Rk)gQ}Zmv9af za$$b3Kt(`%B1em<@?kGXoJL$ot68;nJoWh<&2JjiBiIEca@T!%8j%$pH8Jn7{{pc!d5H`!8~+NF{?lSOm)rofY^4 zU(v7nf@EUXTWTLd%Auqh2efJ~Cv>244I}JE2ho`YGZRbD>sP1xB0gcMS|5;D1L&|~ zprGG?wv+pmSGwK$jBnD{Zu07c+hsM;>%=;rNgJ6(xUC7#e@(C4S`PQht_N6Q$LZ$F z0M*+$v%GU*#0T-YWAPNsKm&6Beb+fgT`4~w=ZJ52G4x~_&d?D3@$t6Eq@(^gaBHR) zP9p)HFs{`2p$-p`6Rsis|FDkhcof94JFR#Zr3EQ`hkA~p62Qs~BdBV9DeiSZB+yIV zCQcAxlS98~e<}z=1SB>w4M_ynk_Akzi=$rNimqA8D9SUVAEJKiwXX3ta^3wzxi*>l zdYRt&Mr-d>yTW`r_=7qu+8NcGvC^;fMAMnuucE{3+fCKxb);bTcfKd=)?&Lb8$mEVFqpyR&ih{zCV@oW)r_%_O`->>`kILlqo64=RAznUEfEw85xgy_;n_wB;V z8mcoJw5@mm>PJL(as<9+C~`Ad$Od&m+0ajaU4IjrCG$quCWO7cMFoyLN`S;neUV(@ zlg+Vt7+VEKja1>z#*12^a4_b|Hg*mRLG&(WLcjQ(PbSwmmi1+6eZOYAL1)5lrotra zcSbYe@xqYUZ9#WG0Q*nDOtbV}wrf{^l8mXg?!2tiEbxTdng~4M6h?Oj8Sxlt;l{jO z0`)!;o++|)+zI&0Bt0aj@1=5(Qyyl9mn3hE)%*>-qR#Lipn}rqm6zE=;+qHEdfz=k zjF}cMr+0NCclOUgqI{m{4^m&0GuQbr{Ap(IlA>zJzaTUnQW zFtu%DF`ke=szjofC4*&}gDT;|?8DGnOJQc15w~?`fuYnBvH!@n)K3ja`gt+6x_c-d z=5G^;e6!q7Mfx!Ov=cQETh9Dw@Vr+H!)fM_UpU1bFnLG~$=H{@SEr2myJFyJQLB?GZOJqJMVzY>Uws!tq0;{BPn!TLsaWk8$fk()KK z$1DMKS#6LGFugL0Bz_MY+`s2y)Q7mwKlc?L;A23@O^!#INzx4Q_}fONC%FF9I!U{i zAl}*NrGigbsJBXLDdHk8y3I*ude$KxU+vj3p!*+e4YOf*Sk4i-%Pyc>@UH);Ld~uc zjx#=s|3>a3ma{fbL?7BoCGoWhdJLJu5^*RmQ|Ba`r(zjR-_&gaxQ5uY!Aelf-ho;+ zSuP7N+-qOI{pa`pp$(Ba7K+!#lr_1+R3izw%h3^g70p4mn_WU;d`% zmFZ2b2bhnrMt!9Ts2|r+(C9JtE7*#}l?e{a@8(s9SD)~oKMqC!0LoGiMn$+UgPfZ_ z5#UyaQ;=MoPG5!8d+0@U6d2Q&RMrQ1 zBnYPoCoaVH)VqpMAD$#c6@7|w46dmY(dZN(UjzSGJ%DS+qp{~88=D^H*yN^ilK~;O zNXmA69UJj{3%dT-`&XYOr&5KeYLl>v#fk#0WSHe)3zE0BcFwn>fVf}b;}K&5t$Z39 zdvE92(5`G7pWNeV3mKF&XeiF&29@}`B|l9Z z{PM{O*Go{EQ1hCvpr&T^b>%{x*H?EL(@QwV9&H8rQRE?FXUdVuLQ~e`z-U58|CPQg zOG+A}5#u-8KhNoaHB4^xi!UZ`W?hd@;Ir~{k2*x2@Vh@hIwt|`m+lZa%S(0ztU^_T z^#yc`;u7_vq$OTyQCBWX=Mr#V-JT-Wh0k9fqaf1EO5X3QA^{X79pJ;G<3hrjl;HMK zRaT#o*7erIqTYc!H4j;&6efB0q_3=?X$+zpdi)SBCFWinw~9LTUZ)u2al_SyG<_Fg z%XmY5mo5Y!nW3ciX`Y5;_4DxX+@TX!t&?t!XIC^K|BX>lr}%u|k5t%)fCSwP>a)Z< zu%b=B2QZd@!5h^HH=|8NJqjo5&kcFgF7yF;WhKf8>)UMGMx55BRZ;BK{2`jP{(`;= zljz5vd{SF_9T6#-vqlH~6XtS+Z+Jxe9Qfa>lpnQJT1P4Z{`LDg^Qf;OW~`iJaR_PI z98lW8p60GGN8?NzE3uU|Agh`fjvb8-LwvYZmR0MsS2}J?Gj9NI3d=r1VXeH^hBJLc zi+t#U5HDjvc1osP&AXZ*EI7?PGPVdFch*^;*AhWZ$KWVJKVQ6|n{GXKya?y`pVfXN~bIHSjY z1rr$qcu)L5=8ymUoGv=W1325+hJL?+)#|CQAZ0LwD)i!}JqOTP4^=M9oUS;kzsC_^ z3fMU!_B*|#46NSr?f-KBw`P(PFaZ?OWzRNET7_+W?~idGhQTxDq`eOtU8*=C12L?D z5R@NM(~7e;X>`~-$I^b^$9afJ8hWq|(`7})CzNh^A-rb+y}e(mAQ8nm-E_(f7Aa|4 zYAxB-glC9sT3ylZdP!;!XDqUznxhe-_`J1`3!i

}eg4I-X7M{L==K+^??=p9BEB zZlI+SFa3cu{KBgDMk!7~F30?lJsMjO%kUrz6C(3(rW!>6CDd|4L)xXipKj;<$n zoy8fQv*&T0v^Y>+z%25TmI$HQ2)n=Pu#LT)`rOTee16_V=8H$bXsDhF;|+@xsB>IG zX52G6v*?8o3ZXN?(tln1z>-*Wb&B;7g8DcGB#D!TMlfrr4R5U%mu5l39zT?h&-KGL z{MrXMjP}2-MQI#cskWmlYYna+nIa8XTrzSW>ZCFnqlT55N>&F**K4z7ljtEO1zSCj zU^i$*8MTS4`sq)9YUbGntLCgBVQ9;qp$`j}*Th&23SYl%ZKX_`)*RnI-r%G5SmpE0 z_05s50>>ygcYSyU3L{uhWO0M+oz*pe5rgHBX$fA@<@`9M83>jr&X%d zhF(7Urcc$IzpUTm+0(G32&4w{7&eyaRqW4Od?cF0C78X}3VFTo6J);&{PePnfi0u40tdb3P7oKFL-+Sa{e|{ z?a`6J5@2omg(53OquEfNEOm&o)GEniEZM?CDO?c~v^~KKzgL4K6htgu#7ON2q>wCc z>%94pDKh;e>Cz-n5|KGGe%IDy5f%e;OVz>MM@KZJCW`o>6gJ@rc{e+g*Zv0aecBnQ zg>n~~s~h|E;m5RWyYk-Fq;)x`Ix-D~W4(SAhCk`S{F&rXm+AYv^;~jH^Byk(X4>_* zC{jeZBbZzt7HKJ6ElR1fz`4DXC5YeTlq7r4%_(H_&Yjjp<$Vao7h%S-Y&0jPj_foUU7s z9}uBij%xyZz=bRQ`6_TUL|hTSrc_I6`^TFzN(U{wb^VFSIbVWuiYrc%kd%kNP2;{) zd+M(1XJJ`Vn7Wj(<5%4>g6krF&0ppx*80G32nJ(Qpm$1Mk8i_82K?QVTyphu4l&_O zW?(mCu6PbZ)~)CsE#-i=rWg*zItP zTmN@dt-^Iz5q*7Td{V478vSBD%cJ2oBBRmm-%khKs<5|yn|#>kr8hZ zZBhHfs$8#s&-t&82=dy4pIrq6Q#w328v39a%x6OlYw+yiJY9fqe_cyevHx59o1k)f zo1K%$!W_-61u3aDVz5L{3qX+Muj=JwOla4o^63!sH2=n)s%^bwZcZQz)yf4$oiJHq zg~&P}_4QPM!E-_Bu&C=PsG+Ma8!^w$!Cm-oE1!H(m*ky$sj6k|B+R^)$4=7&Q zBlQ5D<(=>D)fv)FYN`5(@R!W8bd`lyxd@|C(hgoqbmlOPl5f>xH5%=k|70aIz|(W6 zg`TNAadu1QO;K@0*E(%MpB`CU-7x$~|TKcHX`>l`S zzQNain#7w%=?3q?-96t(UEFP_D=;{##^%2E@Gn=z#xf?!Jn{@HD=SN1!>_N^kO=@G zX)f5j2`1*MgBR=o=QWQI#&jRoVSoC?7j-#=-ncyKzTY-f@rzq$`p^ScQ}tE%-+#GK z<8R9%21ChCt(6MkaC7G7?#=FK@?E6!>9?RbzX@S-OtB=vf6E)u2YKT!G3EYJ_`BK& zC3180V=bG6UUnOr0KFTs)i#b9d0=bwbNqabFC9w7GSzS&gw*G7N-Lo=#sugHp% z3F%SF=k7;*)}0zKo#EIj$K7hDyzUEPOC~)P_-&4ufAjpIEWzK{ro*U723W+q94aU~ z2h%>)eYOFJb#%cW@n5h+t8I{FU}~!sYcB`|6zKzfsZPJasj%2Zg25svU8|(R##Y^) z41Vu#Bz`0YR~Km8AY3H~l_5WmZ1r{@qZ~}j5a{!_Hb{o1vB({+Ly=!dhUmA0IWoW*ynkUp^KJo6uv8=kgT(N| z5Dn%AYE5xC>hM-R`P>hs56#=2+@}yC$bo;32@>IxBiOzhi6oCenHR&ae}H*VM>;m= z(`f+3WwAHb?Z?oK55cwS>>H?3V57^eyRiYT1EMW`3T1p^?a=hdr z2Y*hefr{cH%WN{Qe?HFH9_mKYV~8ZgdWwpmL^np1mk34Bswgshcn9Vr^UPG&;)C_X z!YlHP-OC|8p}9S^82UF9m5E}r)B0(K)9#?i?DQf8&W@cuu)#Q9^`-mw%%J{$fA*Ws z6~}3)51Pu~xuc}g0<5XUJOuceQ;Ef3a=s{Dq|?81Hfmpb)N?(|MCk@Kp$_;vN|5gW zJ-CRF`+z!(2*xT-d^fHclN{n_M8qm_qr_yVlf_Yd8N<)kKKda5P!D6xEqEUWb`gsA zt)WL}R{XAf`QtB59)gGjyCQHu$hveW57aFv4BO`!-{fX3E@F5Eo`qaJ+f=6sU?coN zOnMp(vZusT6x>E9-E=SV2jBUQmrohQSvT~_*7ULMTO$h%{7%}pdRts7$QDel9&Gq7 zB}>7)eN<--!f54Fvk!Q`&-~yUg3cMqL^W{Gw!t1DOZ;8pG+kBV2+n&cHhhhb==CwO zT^sfiMz@jBc|xw!55-XqlTX(7yx5OlsdQB6{JeGCB<(z44 zXMX#Zj$rj@^_aW901aA?o?pJRzEqi%yz6?IaOWXwo7!UVLzj92V1!|V)Byb(mg1wl zGa4GwSB2NhDaU)<>yC;elk0jKN=5}K*_H~ECV`rJ_@qQ^&1e@3^%$*}V2w|XOgR#C zLs4;2f+{FeX$@z1F^gxZxkpw4k0~AY!=z3?iCP`YTtAPBHv+6Gy@a-=wv6s0dT-Q8 zfmd2w#rUrsGs^rt1S5defL(3xd&5uY+AxVA_m(P7YJY$;>!0h>*Cw$}HP%2iTC_rO=j~6) z7(`t6;JSAJA}GB$^a<{_4vk^GK$)x{CF-iyS>bF5wdpqtC#<{R(;>c^LCrI;iCk3j z@BI>!mk6<<0e-3T37m5GyZPQcXy?=0cr$Aq70zP%!~(F*(BtN2fK@(3PCC&J2(c1pvgqpye?IWbugIH`wuE z)e^B_v=}yeapK|p*61R!o{;^`IW^re?87RRiB6ySdcf}XhN(bt;7Px;D$0@8EQ+0g z-|JB~+;}s+%-rjqe?ILY5(T8PbN@@sGP$6&G|p$)s1qX6k0m3=@p*NtYN%~uwdNOT zBU1FkSSRVsi2s$x`8SgddH|dyuA0{_)Z<{ z=&C2uAL}amW~!f)y2*KgL?c^iQ>Ld8u1ek|B>rhQ`FusWU7O%ECd`2tLy?BE1MPQd4Kaa{bi-)pldyOPQ^(*h|SQus`v>lPfE-d1%D+8 z>AyZ9tUv3WV(?x%#rCGxbkYu%pqqxRr#AGfUeAEBi5+B4X8`L${-Q8xCOe~8y~^l# z5rAWt6vvv<7T6B;ZFp#!)#g$&Ae^|Meg+vbh+;-#6&QnlDH~Vxw;qWvW9@SZDZ9C; zV1_vh&pw@T#(whF+V>j3{v!K;(yG5)EJcuZJi=(6E}!4KOOjOh^t#_b zcg>4xE>Gc~e}Qz7+AmsQ*mSv0oEu4$VV;_lTU@?e=FiPSB%#`QnRPcN2<^^t z2r6u*BJsieB4|keL*blY$t1`KGNYFFF$?_VKk zyygL&a2(l_)|&()hJ0f2%7#c6a$ROHc=Y19J-H6`gQC;@2O?1Z;r{(+Uqsu0W)tLJ zVTIZ#hD%5KZ}cgI@lE13=ra#BrQ4tup8MyMd~XjK@syJ=Wc4~z;MhkBs=ZC{on8Z z(#swrRF++wed^4}Y?~w#0y)KyhnTGvnGcOw{EPiob`pl|%`i5Y5*o|AGg27oh;p(z zf+chV!Bnb8V&g-#%H*pR$b}^-+{9QvX-y3pm2>N1}mjH%)09fEdF6s7=z9 zvAbt4Q;Ogng+IV%8=G}bLU7`infMK5AW)0#fE?ODK5!zeSbTw5Dy&14sI_1KY!)}5 z*@)7_3YaRZKxqa!S-=I!l<^<=|eQQZyhf&+M!l_iQ(`dW?0#0HP0$lK0tU_Ndl4b;v<7b?-NwiKJGA-@i z_>qsYqqbnySQk#YfV;(REElay_{-J#($7G{1|t zF+*LID1IB&up|+>H9@DYx?ngVwPuN;jBe9-D@XULom)D*gR$>rzNU|2b}J?Jm=mpp zQ6<)AJ0CMr9+pWeUB`@i9#XvW%gDp3hMkWW)4{mO;otrE_i7u87Ubyk?X7Wr>1KJ- z1Gdf$ce&p7(SK)xX?jjvLp7qFbfh~MDj@$4-6HGt_`C2%$!|_x(#D(YLqGb_pDwxM z^3`j(T3uMZz_FIvUnv6rrlPlr@GcC3GvZpgxQru}qh6BW~?8K|Y+ zM(lgv`A+RphGo_cX?{)sZMYkZqVT^~CK0)&q{R{Bc(grrVH0a|M`}H#wG)@@( z_zuf&DP`SYdp*bjG?Rdnsd+#y-_O5!OPfKW8~)853AHSlM{M?l;6LHNy3VuHn8XFZ z5uP#8UaEsHiM^Yp{1&|jOX3NwTmNQ2ZqN*TAZfqiYH3h%O{UP=Ad+f7n>W{B$d~`T zbSZ$USU{^njP1_D>>d2kmVGdde@4Ad_6a$@gJ@REd-#A@!*U$V6YI0sD@;1?&AHD* zlMWg^0~Zf)QJz(leKsD|_VL^DENH6LDT8`tRKr<)i5J#CcgGvv3Mocd-9LlsBst@s zxh}wYikxbf@o8^KL+Qw*!$Ihe`B(LZLl4?)g+#YT%XIQO{;C?xpccL@;IAB%q5f82`--)=3zF1z3HfLjfgm0K z`u^Yfq{v*UQA(Zu)m3;?SEPIELAJNgO}_AbF1>uP`4qXD;XLM!hf(4(6#r~`3v2=s%=?9N=u-g0uMiqe<3PCfCn+LV#T zjtVMAa@-Z_;__8z7aS%{Z7tH!06oGF0JQ(kgt}7#BqBLz7YrtX(c+K%$nQLia5twkgj*Ajb48VGvIIe`*;~`GKa*FLFjuVe~IE&_x=p628 zoMdZj!(uJDH{N!$oVl=({ozwfyJ`uh$j{R9L?&0Q1Am5}io|D2tpGQ1h|Rt`v&)w; zQWfh^GrBN|>rn(XFguH|W7nQ9NTjPF7LW0!zIEot|~j@QcgH;yXp`UF7qVn>(o#r8-&ZWE!qz?i8#dO%Y#`k>A}34TMx4# zJIRo#Ax`KUM^MTurj8__B9gzX*IRg~^58lZd)~nSI7~&@C3NqQF{@9ik_cnDK8Y_` zjI$H)4NRwWXr8WGa4N1qp&ZmDdRG)++~m`}RYJnus3bWN z;WFtyO@tyRQUoWyEdS}a-?H(dpFalnl1oF_b8!(yC{z@n#;Q#niD->RpeT;J#SS2B zEnW6bk?x@!yZ`CNSoq)LzuFV93IbR~V0gH`fK?cpm5}FRXnNfRK7msar?`n)@mV*u z?%(gaxzf;XtzJQJ*}Ed*u5RB#c-DJ0)2yDU8Aidbk31<%-2teBj65b_8raI0{(3?2 zdW^GYrblZ5pc-j^>PP~$*F26kih3$cC0!C9lvfZ1WNJ-s+iZ4MOXlFRYv|Jq+0uR> z{K%oiozwGG!3MYl4-98`9s3G@74cTxhx35&8yr%9*<2s{J6E%J@1l{y@$_)fqi}?F z5Xu!NcpW!`bO&*Wdf;J};eyTNh7`}JiF8I&wvX1}=lZLvs8d`qsW)!W&8t}iG>QIB z`pHXj`0g-(RCmY3!LpPQ1;J{Gz>xiUdS#;HI?P3Zq$uVV?ijpDc*Z;(;ork)schzBF%H$B zF}n9DGmL|MkIF8rRpDPll-fK$y^UTHn71mag<*2pq^taGGTGfBU{soyMG_0rU4)Xm zI(thk4z6n5`|TJ0xX?Wqv7xtGIlu?AI0idpNeW0b)$vlK0J``rG{DZ}%Dmp3l9n8a z4O4^UuhZBU2#j~PM$t>r^#nhw7+T>=(*5br$rz)n0Bfr+U>2#_AKoc$Z z^P{744q>ESS7bMC-V(w-56tWlY7=04y9VC(=!K2c7N=j104TOjyzM&ngsZsdJ_WJ+ z|E=~%Ltg&oM~@%XPM8bjUtdN~_ANn;d{S4_Hde>Pyt(a?f^$5dc0*i0Wp9wNym;la zLNL)eL8hF~Z54mJ+bj3)fA`~hvjRg);z7+yBSzMc5KF9lKx>=YlERaI$rI#T=&#F# z6Pob6j=Z)9aN{z**dWK|@6AT|`s|n~3=uQKIH8AxPflH5C)$O-ujE>1Avhk2C8=nAZgIU z`0$-FE@+CP@&VSG*C(qyVuZ_8YS(9P4c^5i&Y064KA?%>J|_T{^WCi<0;EQWXkdJ8lOdz z!re=`N{@5Q>?`gk?n9S1gDi1=i*WX=V#9gJ*CEvMC62*_wiKJJjfDid}-&UAj zqUmivMlgEV*de;eV9F>i{#npStyThP&+W)pKKat`6&5T9RUa-Cy7g<(+lOsPfxjXQ zsm)bFP1%=7GuYLFAwpG{O#&!I>`Zs1MJ9_SagB9R{0?;iOY?C|{3WP07{RqWphYLQ zRkZ()KlsnjBu#~A_gNm3x~{v!(fETL1@R>RywS?$>Su}sxc(_uUR@$r{oGZIidq`s zZnrewBN%4Na-cMV#GfqYKNV2$QUbZif`y^bk-syvVj`1d;5CDzq4yAJj_+yCQf&^O z#Qfi^2P$@#A_KC6Ch^+U<>C~o0Y9xBJtKY5Bk+Jl9O(NXlH3Q#cq zK#pjhQGcWYe*V779uOdcve~Smd(Q)6!jyS-$<4dIN>evti(B<=A)G6!Jk}L+-e+ zs|V37?nxlMG0TvNamcgmu!P4low=6_9!BV^m#9AP$RUCd4L7go^ivo2{{3(K#46(o zreSOT|lqVT7uW(z>SaJnsRL&FFG5g*!^rg3iTV3~eDWg&S zcxN6eRu;%^fAb|t`gj*f$2c)Z()sucG0AxX5Xeb8(qI5fHLVZ zj16~aAreXw$nU}nz9WVJr=u zsk}afrXw5Yb0I70uHy%WZ~rh57!IvhtpR-Nk#}la-l#4+SV4Btl2*mw4@pZPVZ{QC z8u+e5)h!TVfi>M6oOmQbp9Qvr&rMZa1jfxaG*>)Nn6^|qFD;7GmjvBPMY8l<|E;Ri zn2{xzAn6T1UDMl_OiNHIeSVAb!d-0~FQIFO%C!*J>@H##uP1}PG;>o;Cj+EE4vq+n z_ra6VfzO{4Yrdb|c?EKB<7!Q~CgxT_20qAs+L!4?p!Im~cZ- zGI;!mM329VSBBZ;fL_y@)#k;UY<&RPrNs#cEKf%?E~eYU>@4nlF+CT1){m5r2qjdO z7MdDL5H1~v8Ad&9NW`ta#`V9WP|~NW@+HCy#l}Z8m^b04H&M{>> z@W#DO(Bh_>b{q!r@!HV+`=9qN2MgU=6NF|mE$n7WQe#ehu)aE`F=9#YSK&Es(`(!O z&~8<$CadpJO0!kFBV?SD;2sJ)9V8~{Kcq*z2);rupVG*aMO2H83CL&v&@sYZRi`G( z%h}naOYxyBAbmAX9NAS1r3+{$AS`OiD1-ygh4_d0gw%^urB}dZmqoOzbf9qMN55mi zLMdIuc0t(Cg)ihG=0QY~@84t3rf4NQ^SoBOH9P6N2=kqx8H-3X-82cFT}PutAxyI8 z0BzOOKC_NT1udOi#HCM8nA1>C-Q9m$pQBG>x|EOBLq!vGh!FfP0&Yp-Km z4T?H3Sxu3Z+8QwC4nn{{UPM+*NB#iFwLsV-#b^9nHD|f>V?d!Q+mN@cf7i_cZj+KQbj+ z7C=0T(_AYD`XTjOKcd#or&lq;0PRavx0gXQu=m$|La0B(s!tY&mUUPdGK^&5`Kmo9 z3!Sh*sMe&RpT9p%mD<9(si9gt&K?Bqy>7!$Q9Dh_Y106~?Z>S0_GZKkIyWlj9NDFf z%fLarSZcCzxOf+%0w3>nMk4VN1U^#dS49@wPi=9%@WEDk$VLYjB3jBYas4#)+!IQG zO+7%UfqYertd;=^cF)XrsK5wDK*`dN|HOY=ozhSOpoJ;^6&>MCA8LY~yrI6|q0p}g zUUq&KJ~W|FXA1y>Ryj1y5=6xOE?`-6zx^haQFYZPPykvxDb>>Kqy_35RPAF5MbHA9 z1`pF;Q7`^0(qI`+A)GR#v`vA1br1&dRL_-C;?kHPxZf+@$`SXPi|kXl!2-Jq^-6yI zCOV;1??69cxXJxAu8x9XT#yr96iN%7ZLdoWs<5K_f>kVLAQOL`{W7efuL$|oW`>F$ zU&_HE1^ICtPWT^Ad*JhRAaYuDQC8O7`7Fr9%0)0yL+5Nbv5U%Zs4nNbR-NOTO_l5e(SMofQ|aiwrX%6wssIxinN1KS-G4 zxZ$D<4@oh+o$zg;vNM$D`Q&*?2=O;h)v9UAGIBQ?_S)#Wx15^PU zTLOOfyLE<2ymY$L6yZE>uR21fLV1pL6xJ(a7yFq`F8Z>cBUHi@q?6pNlM0IChzCcH z7oq64r|0yCjAr&Hdm1uZI69 z*ZB+n_vim#|9{a^UPsSQw+RKoT(H`qIR@Y5u*c!{+emh^Dx7jYe=A3>R;wOO=F?S- zAzuf<`P;}?{|dt!ZyK_hkc-Rn)=tN3o&0j)KWi|N#yt7Yz>d9K65?4Q0XAiF)EC6% zCNx*4fM8;<+;Y8yAfHRO6E6#P?a>5e;C5~Glw7oTa$K!EgF3|Dpk>Ij#jC(JfOzw; z3bKNP!&9acco;hQ)cYi!>)(n``b9*+p`!p&C9ZC-J0}ZS2;{;PY-Ip2w~00)quX6Q zpzj^YbxOCWFca4aIrwnL+$ukVY{H@!2uK^7%)kKD;-}WOn7;GhhR9&|I%P4qdx=6Q z{cB^FCY)*JQUAl92L~>70@x#p2|~n2By4T#3+Fv7F0cE>SCLJo(nC+KuAeEtg4yH1 zAN+P#El~b2iR9}6ppstYwfToO4>%6O5Fcg_z&#OlHeuGqdFK%|!n{|BR8f&uF($qs zAAKJxC^jRZN`N!Tw1};IFz$>9^>JWOMz49ZD&kBHf)9ZBvO+;!xfc?j{!j}?;y{)9 zk%^!ed+YLufo&`~T@AdDd4N^SWBlaW-AL4Oa|2bQ`R-HYVUpn$FqkQLQUHp(S`EyE zTahWJr)p(ZH)_E<{HP}ct?#nC!$GSpiz|ocIqbuv9tt8G5VaPMt-(uUNn^m79qojc zV*Fzea4;M@XV0?V5Z8-%P_ZuQXO5!?p@uZC{LZJ-nY;2J=1}SJ5|zJ(Ogenqbt8}o zlVGeub4O4Jj@8?wZG)Lk7qjz7gzuie1wQ8r68Y*Lp2G#VzXAFi73rl>Duq9==sZqF zvW{mh0@Y)gf7n`e|C|5o_l_5MFo8gU410llXohyXqu$m&kmf8@)yUuTnc#)BRzQxQ_X4d0 zLO$W{&{n?RLrp?h?nbL5UQt#^AhQl2^~^@9TTtV);ybyBnW$P@O7gZ|ef{_jX3-W1 z6l`}$-i3X!{uXxfy=sP01Hje#XsQyF+E7|qG1M*uo*ir?xIjTspDyu!PWQ|@fEh0{ zjC2oIQ4W1ciV=ufAp1H;+`s=HKK{M(R&Kj(-O^OxPP}x4&qE1-$vL5;O{CDSLhgAge$f~6Ga&q~@qnstZiV5=x2$XfBjQn#Y5nGI#KOI@CjAoJ2ruovPU z9I0bLq6MH*4hqp9O;-tWVm|eDsm>k%R7hSqm|7;l%MdFX#D-~J>(~_9=y>4+;>k2b zWYK=vqoJCnzQTd((0sW_kE-xJvbDHF=r2sWkdY)kvk|6#)Iu!2+fzaI;n$km8Q2qx zA79Sz2nG4iL{hrVHJ(JkvFdoBQ*8w97?5D~b@d9%soocK&g`c48p#NcgQ9m!2teX_ zH=*Kj7#a_=*?KgsYKSJ9v#yAArG9Jh6IT;#C$MCIQ1CEQ_|ZeGSg&} zpr=SZ;$4UgQl+&9b@J-$wSzLuco79QX@3i4Sl?3wHF8-et>J9_QQltki^btC3TR`I zB{^I~q(<0e;)-j<% zI?^EImUHqu_ov)m@|~vfleeLm_dHKw9738y>J}WQ!g&^}|Ddaw@Ba5itA{X5MqR+j zbW4YGbeYoqd%_Ljs%d7BDTYvGj771m1}ym)>1K`~RO^24WosUUqMc7Vi)+lnid1!* zLSv&qJf&$E%@J4;89^S4-o`+dU>?r zZA&Cg0yoKQ@r7G8@+_`@efocV_^*ghHl1Quf;^_m^XZw%Mk_;5da(UEDxB)DwAVz8S}>u|?(nXAgj)epT+@fBd_q2uZ`mMYdcdgHwNRo_(ocfH z&~Qmz!5wdY!EmYy%rG~Rt@N_IlU=ENSe-<-ABkIM6S%zzrDt7u!?b)E7VlSpS|d}r zHJeL3H@MLLQMB~O^|dh&y*k+4-R|p;7y#-;?M!>4W}wtj`tgE3GZdH3V&++&!kk4w zL|qB<1jDT(!|VAYHTW|L=^}+wSZi9`hSmo8go29q>&G@+!`#Z%YSUip>B?kw-FgV5sw~^=)Lv z8-^oI7b1pkaw)oq5dra-1zbZHjuep75ah@_z-@8dbDb2+YFS0sk3RlAbIUNKFQP_j z;7Wd>=s*TF4hYwffQz{pR5|*J8CbRlTCIm#rU$=L0i=VeeXE-7GcN(548o%8&ToIM z&YtytmVY#q_rig z{pTHulwug=%1SgiAoT`%-^0WwOx~|^E+B1*Z{kgPd+VmD{$D5#XtS!(sZd@=-mN} zb)9^iBiop)806uW(-ccRxJ*Jy&2W~NlvykPP8hG6Z223HrqpMKC>dd%6=CFzExNs; z_UWnSo?v*-BQLtTAg3Clj(e>Tyk8}Bff*eyv9rRQv;kb{j~)mW6OQOe&0M@J?9d+- zwdLvb{GNg>W(Lw78jI>rj7opul_o_BqHQjJVmK_J-e;A+aj8PJvQcP*XXZ$ zAelZzeh4#BBhra`MH)xOxg#jtIM-m*Qc#--m{dDOd;`CoJ^;ccbtd`o7oYgZ$wZ#F zijI&0cg>zNVUDR(L!6tX2eW{0)RsXhG?y)2N?{aDi9*ryQ+*t0xYTo8l9QHgi6_dE zAimLiDS!M6Kl`Cjt{cj@s@!TX58sD5eM^Q1(v|@=e9q!T-Is#1R|&7@-GKbe+V#ip zG{4mDvGPmwyO$H@13bV3Dt$wQ+h2aELW%Mz_xofl3^V5h;Y@<`ch)Jbpqf`$iqhyp zcfyo-n1vfS8{BP*(7-gK8Y{bP;)EsCSW4Ch&y!p`s7S*W8$xSF>^=NCg!nMVR zf+dpzoK9C#cNk4R`VBl3^^3-EYvT$=Br z-~H%^^)m38fA-DaeES#Pm7@4>84^(I#&}pTW7{9trj%Eaz1M9PP5s0*J{TSNDP)nGo#Un6&_1UxjP{xCjkLGB ze^4u?^OTQt@+zzUsTjYWn~V(v?(nS)l;5eRJfX}p$byi70c=f84g&&1HVL#(B>qT; zLp3j)Sg^B-ir~%f>bMB5g&4)O4MYjx>YFYz|Jgq?p;ngy)N+8ja659i2Xq^0FHG&a zl4ozbxka>)Bdb5`?tfj21v4QVyZ!2!6b6(qecZV>u!V z_cp?#3V%$hk zaZLfSY{rBQBh;LadVAI#+09FWj@|)6I&RlKqCZSw83Nef^&FK*bf0vz^Vpk3gebBZ zrX>|*9J&;)l^0b~ipWF3qWO5oG#3Vq$;!68i6WSI)OcbFdfmbi)c%o@0(RCJ)33M~ ziWtw_Zx9AXW%GSQs{qDABwaE_F`Nn7kOS_oSK|5p$NOK`Piopd3HNC=Mds){yhsiZ za7}+DGkB*RTG4LwkIk$^a+FT-t}!sN(rPycywV>=P0*7#1m!XIlmGl&b!skhYYC2e z(9@;*ZH`>gxr`s(K^Mkhb`HXy$w^KO^p3hwEUQ*S%hlkxYX#I^!T>zAej!w9gBcbL z!A@X&`Oj*Og|%x|=N1x1Xh#-DyYK^PgCJ=9X&UzE~l$3J}f=_hp$4|`3?8PeMXQ<~BPcJ`x~ zmYi5uOyqmfLUjb7;Jtn28R zI{*oMz-Pqt;R&xF?FuG1U5_0)=4P0k0*C9FMHy_RTkB1w5%`|ej}21-0X<@bv#sVJMgGZIrlY3;<50ScE3gFPz&Wp5Pvo1GnWBsj z0Qg&fZ4&XbqT&9=Xx*PA$jtRJ(X>S}T+vTsq>i0K{kr{Qte7kG6Xb)!zWW6Zv|UW4 zmM@j+597++Qh|7AN~wB@fN3_1EgX)H(RXY-3oxMPE8FqDc@;i1Q%ERs0pjL$jIdV2 z%h@O1dY-dyglur9GYU1=5B%j_wJ9uODzfN>t$gw$(|qUI)|P*Iva!EWA0*7_Y6p@% z*Eu~&eW$q%#S(PtsDDmR6G*A>gqlcsVM;^>N($nV(Qval3md^`p4kMP)?p7|?ND@p7u$f*Lkje~-KVMQ`d)!*Iu?a<|mbgx2 z0OX`_b=3$m0jj9 z5ZTin@fb%rJJ<7QN@cdU!EfI5X!qouzG#}@BXz8#x|s)KwdP^JrP?HMZRI$|9v&Ky@=*IgZJAQg**@Wd@ zX7ZBJ&?+%4M_SKY?*qsx=pnCS<;%~%@Lz|Sn00FyEYn3emWJR#-Vdr)z4OWnSl#hL z88J8MMk{k!Jslebd7T~660f4*>GI%1LS{pMYF|bb@PO_~dlRM5M)i3Y0?av5h;(rK zI}%dUxJ_HrQ_N0(6X^0?)*r8C_3g;f<2kN>-3}T~*Vg|F`|kWO4)npTG3|-Ag$8Y? z+9Udm6#2<<^ipN6krZQbS;J%1513}Nfa7%#hHss0RYVs4OI&|q9s(}-LC(S#Lu*J^ikh zKUaV4I{L*>N^cthDJ18DLo<85Xl*X)o!rd2J>y8pvymY&U5{p>ld1sfM}>!?M*wP@ zVv!!2!9Ys%dIP7OMo0yaU4u5)CJ40UttJV%tP{q|y=WIny8lR^oJ};i z#l(Q~gh_;W|8+;&fgxcI>>25205_RiAN5u%l&7S@yn7vkA26e2?$kbni|8O(L?g_e zGs!9Nh4vAsYTF?^#Xn9DeXY8qRFwTgpJk~M6=NpdR5-M6-Gm$nfgOB82 zhRw9U(W;PqN7~Hp>G6MCKqm6{-~6Sr_{;yd?jTh#Y|&qzzBvO%89|2Im^!K~dw^JV zngZFFJlhM!ybLVe+1z>Tn>41TD-#zN^8p3yx8wb3_PK8jk@#7c9_m}+IQ)FlTZuhV zTE_2r4Oq-+=2j{imIik|$DxpCXi4fiQFXYj6Fr#sGTI;fc3t~F`4g{~ajkOud{jGo zYZg93OFd+`_p=HML@UhE@sQn1pf(o#X;)6IZ>48H6ZoQOpC1Rh;9baxi`j7lfO?WGs^Be|^`8hI_qQPH7)2ha%zDv*}y!fw>KgB=t-+t61TH7;}@y+}s9z7E1 z*Jp#t1eT=;d^`*Hj;aol#ra{RUNk-iwnPpk!l%*M2~2iTV(B^mXs7wfk&my%zafAAheN%yC?2gNf&BrqF0qPojx?hHE$#7yEJ( z|3iI;L}5UP2!^T$h2kMz;Ldaw(g)lkVGj4V_~@fx#i0eBle#;~D-p_4J0{=I-3GEt zk@n&Y50;Pmr5DsW`N2>{HT2dC4P&7IR$MAN;$^f2kVZEjWH8+HhWJ8qUmtpv9O**M zwAgjO!9#R0onAH2^OU$fw6vE4%CV(A9Od0T#Fi~0WAcPK2e4{+UOWvDyX{6iG6DjR zAxmrmVHd&v2}q&Yc57k^s@^4?M@auAi7gJ~@j*teaUsZdJ;gbTjZ6ebcfRQy-6M&Vm51 zi*&GEj|YbdhtyB#H3ri9fTRk!j4ZA$h^(PLxFI=bh=Uf@l!h}wL5rp&eX1cSbwyV` z`LUn)*y=3p%08QTcff@XjkahtFAQsqxE?gj--=&%;FH(kB_Yx z!~zQW{_QvCCFQ*|a4)fao}et?3QMuk36|_lH?Aui7*~}7_ZtEh<}_{+#R6TM3vg?J zhv@#~OE30`%B`7o)Ol=xPXy^m*5#eYn9qrN4PQF}Aa9zrZt=wy8r(JsTLF9P7G2w; zI^#t*(#elcf`RUkP(zyBkES6-#=uA5<%=1>-FVTv&4BgKR2DIa&lJ-ACyZfkrQhR> zafWcL4{b_?o9tk}eyou;X1Uc_Pw|{K*3*VuUnF@d8D&Md)Ju znpUF}F^?NM9tCV>09B#gy{qTYnmu?GkM?(ivq+L4ZNiJ(n;(M(@8|{Pm9JFC9H|ub zrC7?(X*5UQ8U3%pImQYAF5uL}iEd3_dwrC95E%qZuA zI0<^H)n>_y`}fZSVggx-BKf0~I8URr1?PZtS_7H*9Ko1aH4P23uGKUp6ey7c<%c55 zM2`#7a>af0qw+f$&5+$OWZiU8@dRhZpWNoS&g`Hm`K;xg8YMkD;r5w?!*vRYGfDgls?a3ic;=^%UVeLkrI}Y&gznJ-hU8 zy}is{ux%jaQn;B6Lx9b+?}s(lqKKYjh1FqET=ro+gPg3&_nEz1t7V32w#fou@@a8y zwDch4x9EbI#u)WN<-e8+X%y2!ztz(L+}Rsaawa(ZXN#ql12|#!FA%0(fNEOIRrux1 zG8ZP|>>_wze~$9$N^=&yk9g7|Py;7aboy2yK3Y?KF6WXL=!U>FobB0J6rH#Ycaks3_4 zEj3n+RPrf3BAMl%RMqU&BhOGe#mV>Yf3{l8+Z?QEv=?1dEbLMpUh5+ycb7v=ca)9=#50)$rkiWH0=Kn!lu z%PShNroQ>ZhyO~+{NlU*79+_L8Yv7zxT_orL7E^XeZBnM$K(V%CBIrE4{U&Y>B2*D zKOL+wEh}qn4J9#o6V(s5c(2iKa?Fi|P@xBLz*PHZi@W85|I)0ax~oHyS5}=|dth0x zwYNg$T|hPH;f5>Sr7-rCzpNN4ir2XlQfm=RElcH%h6m7PGM^9*(R>kauFbF_Qs`s2&~$l!L-lIWkro*Vq^XYnCTx^vm#^rm z(lZG=?TTgr59McMjXTZMo6N-;6!DXms$*p7*KaZYi@ut!dEM!;Ej)HD;U~d;R~SOE zH-%Sq4yL`1vB9Kce~L+7cg%&5JWAoEPoY|ms3Abm5Y1_lx-R3Z+j+!rcteDdHbLPK zy^OoLDWJ2@skg80#tag%+j@lIk2~P&w>u7(*a?&0Q)V$;x_ih3Rzr*&O*+Z~7mcm; z)F1}+bP~7Ycq)isLMs}Cdr7j8e_8}8joA5+Sm`#~XzfXH(>^`bQL9BJJeGd^j5Nj6 zvo^Upqp2TxErx5=$0-^8S7!CIMR>fpsPzs(h)~@1r5U}P>Ps{ennW5~p%z-ErL|Av z7+tPxkh%_xFdoM_n^};-r&RD4TZ(~G5|K&##OI2y0c=_MA3tDKWhyR;x+5r1590># z=rSKjrqpE^DoqWvSXo)Ay&uc)O4d7}G+?je&;7dPiI>Q&KGOfKvT;Oq4W#?SQ7 z8ETXO-*pcXXtc%BWDLdy{i`~rrgISTi;(_3!)lf3OvohEUfqWxVsez**hcd@8*j)sWH+htqCudGg2Won zIs{|$aiq8Lca2zE&7x6)G}swkz%c&>Gt3X%eRkku9WR09)@Ij4MEuuvN@RSQ$gxpa z#Xawm8u}{0wG?i^P`f=NybwLM6{76+`sozs$E3`W%9yP%Rq^cnx_7mDcHZOfEJCJ& zwP&jxffzoy8=@Q29y}Rmc7L77gy7mf5ul$$2Do>%Bzf|s*uEZ{Y@kv7t@aAJDyG#M z#V}q2_m(84|N3I$ekXKw@$xli22I-^^n>=F8JG?TcU=(?(7OrL#5Rmn%SH$Emazn9 z!@(hK8&O>$m=i$ST0tXb+)IxF1G0=R?D^sMy)6pk0+GwW^PLS}r-zbl?`^LQ;B~eT zxFp7ai9*wWAkodjVJxP+9=BO(ei)8$!_n4>AUS3oLV68c3$CxfAqN(nkAP0rD~FD&2I#m7AbUOxLgofn4T{wZ{0f*1faBm2rgj3Zh&HOboHWK@o!KL7m|7ZT zhN#Wr{gEe1DA%12#XeAZf>C9}o?RuQy4Knsjcl{YF2Pm?w_?+JKZL($;e-;1!uRok zYBmhbTQAlkwR&*A1@dXt->p{s_~$9>qo>gSigSL0HUGn&D(;0q{E}Fhe#iv1$dQ? zA$%BNRXc8|&lNumw;U_VZ$?(<Wry1Gw&uMevOK9J;-Lop7wg9w4k2Qi!ZL=QRd?BBF};Bu)8-*Qv5D>U&M9 zOF%9(>R2+t_NS!iQWO3B=f#BkyZfa9Hb%D5Y#!T2BTk{t>j^(Ax#dRbRl?-C7iMj-`lC@J)96yZn+DWrbyw!(<6Z*Gg&gp4(>Q+IX z`hKusQRwRnrU7{@$dS&MJ7%Uf(D71u9gPsWEKQB8H7R+i~!nIKy@Br#RT?K zzD+~ceHbky-(Gvg5+h1eKcPZ!QYr8}94#oIFUoZZ`{FxNP-25`~*B6tQ{WB!GOoPcA)2At!&n+X?rbz)N;b# z1~VNx1fvuh<(^k}>$B*7ET}s7cSN204^xJMhbMTv6k2(x)sDA32-_Yo8TY`JvbCH- zor=(s#1wl>>8lBmN&oQ)yfN*0h=*W{|J(rh{ao-wlqlipwFOx1Y;NJL-sI|NzZ?s= z(s+zS1PJ>}hQkkeH0e`EC^4(wqh5ZwsI`o6?xlv!o?JtrYFLfg=*8$@?THz1MB8xg z&Si&na63b^O~_Hu-1S{YZq1GFRTF_F6p;X_p&cJz)A=uJi%2?6XvQ91&oJzsH5b!? zHN9Nzpvdq4|NO6*in)VWl;+QMO0}_LERwKcj)x|PQA2`7cnNqUi|1>;y&ttUzdNKa zyXlK5GF=)4COPLL>E%XU+8hF%N#Mlx{IIU(Xb}!3rXJCnyfwSs(>lU?x75csWtO1x zSBlBdK+OR&91mx9Y<>|UQgNCJ=my!Q)m(4Gg9u*GqmjHY1qvy zxRu*kScr=-rmrK5K+hXK)T12h(?f6QF;=&F$vCKS|Na->tEDhcruTcmB%gIm)&2Xg zcc1OozLmM)34M_-5Z+Rsw6G|3SLy@F2NWb-4b&kjg(xF8_R4&s!>CYz8`tyd3i>Bv8;Lqi-yS-+&sB5p6{12Hz|7bd4?VI0A4NDqhm zDE=BHO#WbY=!24d&4izG>&qIC35{xPvU76o;3ylV&>GD9rhQ48IwOERfZO}7mO?>N zB^GimL)pdo%uP*c3KDJZpCfnPVeog$QK7_cGvRF|-2~6FD?nKULCDj7jI;S0{OV$0 z&NpYIPpbxIseRMs6rX9iLKZ{f1X0UQic9xV$IAs9D?jdla$@fh4f%3qrD&s;^`_%U z6BO!CU<|tv#l7giR}HZ*(yH2Q<-~Z#Gg3ImG5RyDp_O?Kd5>G{qZfcmNoz1{<`8vH zk&restWS@6`1)CR0#8xMWn2*R^#!3&Z>UbJzdl=wmfJ&MHJYg7oD?m&FXtC%V=qVb z%#=5T(XQHrK(A0891-6~3=(!vWv7B9xJ_>GhAv>Zne@DdLN7;wT|kXzFD`YNwQn(5 z94!Ki(QJUGA-#AZF8NA?Mx<;eoe42!kMlNx4+}#@=m^ciu@=Varm^gDlHeK5QHrTs z_`nCJpp}_{w&eP1keUFI=XQWPAj{ZKZ0n$;>Bk@WWHligm}j=loFB?o8z(1X!8L-Wl_iSSyFM|L-M$& zyW6ifdAac3*QdS73JrfTFzW4U!i$HVR`}M0{y{$1)$6xQu$3DMZaJS?0P!^xSa9wT z{^u)wjl}<%ix!A*0;Y(|I)K)3xRXOrc05bZjJ@bNkMI6weFb&*tNTB<@sAZ%^XK|w z70L0?A|5+?T~vA!ZZ7a^jAw$`^=!~VL}JW-+!{;+@Ngy?l5a5F>Y%|a#ZB15WV`I* zcyGuDnxLowx?uSLs3>|Jb1QL}E-w1KjIE8egCe!+DzWpu&-AAPsh2!x4l zll2!aOE{y=c)$`ZN}vmG+C07b1)vRX@)EB}X4*{sH1HjVf-DuKc#edVAM^-V|H-_M z(N#xM#Y93{QXXN|M&6K+rjJax%Gr@`C3ohC2uh5sgWL9t@UOGK=6Jvt73+-y`X`6A zv#Z8!8wkY$hsNA~CzL{5YFqx@Lm-0sqxcGoxRbGn|E zSwH#yPrhH@p`*kyB7FoDCgE7bfFk2XMM;y2;hd4mxb}vI{qL;Nd0>{*8_3S2VK@a(lfGZLm zMTtRi136QTtcxaUba7H7t+*V}<^Ng#-X{uIbZ$Lz!&T8ti8c+|Y>Hc|gjjw1j3BZ9 zxCOwF7RYsXr8naCUlFQQG^O4~KLJJ(*N5-lzg$8Y%#VmrBQeFD#j3~-EEq#&KLc>F}ohs zQwGFe%4nFSFN>}(K9GY+M+5RM3fufA=6m{wqj^U>Di|V+WAO)N0cW+f+v%HHgmgL? z_#pKfP5{U41RT~#W}z+_E`{{1=VDZJg9tK1bzclMS?m_*p|VX7(c02;!n!aP1}8ex zK-R>s>&ABuxPSkLI1Anf_z~oR*^pXu^|o933^@ik-+Vy9X4h{jgba6;>(p|VMO=K7 zG)r2S-=G?fGE^gZTA!y#YTk?Jj%b%r1Ov9vR{)lYHJc(2)5O$g#~UL=geLRGTCl8{ z6_$qBh`NTEEm;ME?+bCNmM+;Jm^&=mWcRs_ogl=Z_Dn5aiy2Ty zyr$6)K?XcPg%kZ`5^EUAnX4e6i2O2gYh!0zTOe{UzwJt(V$kQ&+;bKkR#fml=FpLR;d#wim zjD#*NE`-|y0W!O6(`r&C&n^{-?xA_2%6V~gxlF{nv(_co+g;ySefGG_n8$myCyca3 zUjYVW|2L}*k%lz)!sUf4xLRzvHHi`i5SXfu^|r+*IqwtirPb;xtY8-aoCs%3@C1^X z`m0!l-onq&h+P=J$S$x`b=c@oH6;3LUtk4F(D%ZB)`Oqzt&a{i{NQyw$euRcn|D|s z1q!Cq=%zPu5%k6{pl=qHeUfys@&o1p)$3HSVDve6gBw79_>@cA2PW9)5XBHdj|Rq$ zR2oHl+qJAs>ffWIN9#sP$M8DmyS`qyfczR+Q2s6pC_0WYjoj zS;0v;Pr9G=o!|49xnj`=9W{+Mu($?R_@FFnrESD$&{F1hLVFKqf=&0(`Man;kaI?i zk^5s2F{FUwEDk^<2UD(m`dQuZFTecKS5=Cw;FHB@*Zr_CytAo1r6uJ0mzvfQJ^&eY z=*}-3Mr(yCN=0M?TA~KJvO-s0^+O}ei-B8`Q@O<=63{@p>3Ek6LDSXr`GQY1+@c~= zPIS>|uO3BCZDKUej84p%>yZbf0-;(2*#42tnHtJiHeTIEF=tlEE zw+O<6x?#49q8W8IE1!K)(qK=84qn3}+=Tuy>Vo}TVhwPFnpM_U(F2p{7A%?FZ#ao- zHd<8K24J6nUuAu5Sxfxabs)bpdChIa5U(#|+ukm^4W`~fv4DBldi2D;J%i#Fv$%Ul zpdL}OaRrFHENsV7TGu(`P~gNKmVfZHd-1J!FpgwOXQSX5tQEK|RW;KnReR`dyudv2 z+PKNpq|Tg2{#$T!>rK5>;lFwc`H`DfJ(qDy1L3}2iM%&UUlum<5(>9J5$#6yZ~H32HT} zbh$dqVR=;BW5xrkb#F7~m%wkl5S51}t8>{HrJww^w>$tbB4e=LuJq1{$=a&5KPY1& zWCnV$hdxIK2PHSR0krz?>F*p(t%siM-3L(lZpd^Jw!Kz_f`LV}_CyP{c`UWURvTY$ zL5vZhF@4AlWlsKtSnLwdJl^P$Uxnz9`aa0YdIiI*&B4(6>8ta@!&oK`QmsRM)g7J| zieky28Y%c*8=#(Jd{A6U_yHS($ftjCvC#CEu1A$EDFVMDs&1W6gdDeuK^mk{y-3=) z7#z-R_G`B{5Bz0KKfuRF8fB^oua5vw1jIL5ci4JAaz485(}?nV{*NB;W550X@Sm5l zn&tGZx2hLUjr?h6caZvu7j<(cHYpw4NhZUQ3zWA;wfD8?`R&`Qdd8D|TR2F59;4tl zGTjGx1WdOs&E+gwnrK33`azHOH+sAmvA^UaZ9mH+h98cXTI_CK{&HajzyKJV3Sv>V630<%+48p+{GhFcf5sHVX8i`Gw&_(D%T1;xlW=bFYAZ<_ncon z9PeRDt~%_YjOn#zzOh*RF2;yqO?InLfruV0?PT!hy;gR5NpGv>1s5XhS_bE66uBt?W&lT^NXMOh z!-;so7@8k65kLzssFEv!D&E&?G-jtg)Qt>B$+->gaEWUxyl302NM%c`VDol*(V>b=t_wGtV`I7H&^HL2gk=W5JIvw zdg~k}@KAKP{>Aq`^2WA9|6EM_g~RwE!Qs=o;Lh*XUegc5pCT{%b#U%rg)2uyyOOOhC+WTjCC+5V0#+3}na8(WFgKg?`l&L9AeMD>%teQUHWG&-?R;=0#(s=Sux` z!yIG@L9j)aun3vA8iiR0D=@VK6!-$@S-&4=;}#~3qZYS`0RGI4Vmg?-^+fzG0)pNY zKgsqhnM0;fap(GxugakFC)gqLi+YAER-58>^abD=VMXnGhC7Ulq@djyeccn2bAU=i?gT zu29rd-xSn3o7RD07J#kO;m~hkDqWP@lJa}#1;wKM&mDqBT?n3sNalM;M# z6;#*+gMsVjJH3aEf<{TU)d$p#)?PDEla@rpj`myVQs|3y(hTS@;>n!zCxsN4QtUa7*f+O=(8(L)d~()|NXp=PbcB#xArcucrnDcfA!5@ z7N6m#n76S@PZwSR?M8^#gkV{PCIqX($477>335~S?_VK$gVxlqpEg8@3u^0Auf)(aAIOu-i2D}f7`xfCKsdu-Rp#BaTWR{GsPJ9+`W2FcX zfnL{hkDFKSJ+$9gUGaJv4kJo4rLxcEXf5jW;0EHu7PJH!uwKI{Z%xpfLadi7_7r^A z<{4&*`vI5Kl517S?DYu7*D zO@o{Y{CzR(~KOIg)Z3BVE1myaWEzbDJyJxRbco9`-@;RIaEtl7@<=r=(oww zKwz2e<($k;5m`Qt@BU*m4m0Jk;^JreP@nfm%d>;EI{cL%_xQ4sEdi}lZQ%5lP<+Pt z1ZDSA;a;?W&yEPAyZeF6vlB!3DO+_kFRjz+ky8J)kIQ)uPaOPRS5GnMyGNLayQg$k z|43;eFNd~>xw@?>QQhJcVy6gX^8ypb<^oF;T}F@g@k<@!}2i&|v zmf&UhU&o8X!xZkP^Wz>SKomGbO^|{{ZFx~?sS&o>P{4^)Hp9;x1JN#L$>lt#R{ zbuf^4v;zIGJ#hY5fyxj}Q;n?0QV2`n;JWoXGow<68?l}diMBLh#%p{2=xUU*a$R%= zmR2~F0!hf_s|~6pT&a>~qYnTIua@TOmbs(Vtqrdn6QnukemgTlBQ`P?=)SmFtwS9O z!W-#=O`V3*W!xl#-7@=ruBL>QBp9RiTjfn%)gOJ4ecJlqL~mhDIF|Z)3u;oa8yfMZ z0U7*(DW=m>@8oa)r62ZKL#%337(SD`TE|cR=p)l+kDLx7IvA^o`8eg{p)$#vd<>d_MyU4NYCbV{}rFK7`^vVKEiE z>(O#)0vQtKqXSYw6x7!)Wco#HOQRi@sudnO52T&Kh0X97h?DEt8*hWqM#2ENNJmlb zG=0MY>zHod1}k0y&CZd?KU)Oo>yp|xO(-mekaz0I2DcrXuYf+$y5o%6E2&15*V+eM z9bSkj09f10KhZ#C|F}WHO2C#i=oMAtzmL=l%G4dVFjUwh7|s}ZG53lI)h*s7Ay{-6 z?QPa^qEf&1;yiVCcB1C`_GRkb4j@io)3{NP)-mF zWkR*Nw5b;XtV8%UKZ%(7QjGnG?nAhm$V-1l;|B9*`jXQrz$%*Ak%)W|!OXkIy%2t; zXra*$UWQaUHfSB&HgM})AVzuQq~(L&_zh3&gTC8C;aEXLLl5bZ*%yj6nm*hvpPnDPP6b<5&e37O6mV?2)it@ff_>bCjD)zd-QmL#pBXY~R@UO> zYk)f_>tf!Ol^=Vb`#9%$J&s}6?%yk4VNt8#d~@af?FIqs+oM&2+XO$<9AxZ+wzt>? zT_mXgnS;nkT$L=EDV|SE{g%%q2<+czjInNl*FqD6yEN?({a=`U5i!8kXeAZj;Oavj zZxBgD^aU~H6QyPSRm9=RS8xmbPK6V)^qZ;-hwW9{uX@K~F||Ow|GWB$cq8&OXf4eu_pO2U)YDDp(Gzr zZ3yG|RCngDdk-DbZ6Uc3OVt^zPDp7AKb;Runfljrv)Ab~FAZH$f+Yp9!kq-idyxv> zKtD8+V~g(8p{-`GN)Rk=u6)`M`k`-ZU);Myaf{T;(7eYS`O^0V?O_q71;c}TF+1IC z|5H6#Gd6w4#wtmz9~EtwE-oz1UlQi(`nMiH$|Dff4^BUJP*)q`WSR-oQ{n#Gw@yNS z`xp1W_R1+%5KIs`ZN}Tf@xep-Jnd(_p}?Mw?5DFr|DFdU|m+M z@Y@oxu5whG5}NhxhondSh%vNau|B&+L_Tpiv7^BG#xrTQx#a4;d$M)O5j#)4Rxijx+YFeNxEs$>US%a{6Wl5?c6H~c4V zNMyWdx{N%a3MlRsYgd-N)+exsS?p%sEf0-x^un5*6QOsa_A{k)`3t{mv;;H~> z-~Q=8{OSMQ|I8qRlvzsXoG6x^9u4y}bsLIoI9hI<^Yic2uT{X&gPC*OlA{oq4?%Fn zl+20E69ofM1R;LZO~$QB_w2EX6Z1MiF>!^$g}0TPhY_fKloKcIdsP2 z88-Sd7fR$2t_{rKc2MYP&LJsB0Vn{~4WMuesK%gaMkqx4+18`p+J`rhv@J16Qxrwn z5i{WT@^9@z?zL*DH$QkHc-V#iZSUNcMU89@bN2dR66V(4|3C2*pd#!ZwG8v)<)QyXJin`A&5 z7c8|2(FXh?cJw?z5()?abBtA6xk}x6AB?a{ZymgL77MBp`1k5HA>uFvi|(kLy;{}L zEVz}HXx&YKsUQb?&WZ7#&EhB!jy0|e%mRk7zCt8&+skp_f)QINAsK+=qbJcs6Dqt> zd41H6T0nW3C>mi-!pGcNPS&@$*k`wo52o}F$~V_LP;D2vv9)Uq@lP z2{C3l#Bz3>aZIDqXN((A8cK^^stgietP1Ef!78$Dm@O_7Yy&)a@viG$OFqA*eD z=>y_O97d)Ok_IT5y$KYzwd7fuj1Z7!O$4xtI~JkPE*CUv*+9M-aHMi{i@?uc1Tbk0 z@i@M^FNf6Nwdr-pi0YY5=U0t0#bbp<>?vx(k7S3o1IjH-LzlQ2owY6J-Gk={e=B3} z?)H;oYXwlSIj|cp+_Y#qXCE0-)f;Ipe~%Cp;jyHXD*bBv^e2~LNppmDo@lARapVF_ zN4xFX9D5@!!anTUzSzq%wfpcunnKe6UR~oB$TS>WI=(^u7U4ho&00&G;;J!Klt(U@ zN2v871+)hr@eC|Fc`%m{d|}P*jHlB{xAz8MKx2cb$#jfwcmJR^=Q96A(Ia)3@9K7N z2+k@00v*+pp#i5YTXs)Jk7=m2i0Dm?Ve2>kJ{ZaKdia2w>!O24*ScSAlhnETqJqTV z{iWIbu}ei=ZiT(OTcIV!1YK^1r&QywjY|?umZ0UpJxZ%)mGIa}$JA^cZtd;XYSGAR zI64XCn#kgD6uX^6CAx3}V^j+Z_@);Bb?iD0j!1+hw?zm%(}|Q^4zpS&syk)4%@uHp z2*6Abmo-gFnoNV69t#b2ZUQ5D&G}GwgOErh!Oo=n>4+cnRp2})SD*&8wYwa^l>x!2 zMRNvYvX5pqT}V7+JBgQzu#YuvEh|_QnpJ37%vdC@vQ^N`O&}dJj^wI-Te^=v`bXxM zQ%Hlr@}mvS(6JNBBo2MM^*aXfUDQG4Slz0@qFCNiWYs?bLEVLpt&Ub*&Zt*=I{XAO zlce#;I8H^jA}Y#CKxgnDs!V?-IjWivK9#zk&9`Dv^w>F5nMFakx9TT?BdHjL+Piqj zi#KOCZ~X#*~Zg{H|9;#GAWhaZ0w-358qw$|w+i z6Q;-Ru>1B8M$uXj5Jqp+S`($NXWE(oVeUX)6hW!+Qa36(^=<9^H=zI!&&Lt!jB0#) zShOLtXwS9ajvyQ@Ja1=N(7;Qm ztwKySB0ocvj(B1)ZzgBzQ?SP5LBkIHt!3%do%FPsgc|rRQCZ~OVY9|p6a|1MJ?=NN z%j<}fa0=s3P%OTkZg$KM3~nse`7-14XBQY`UF%@GK(HF{30pcWA|NNS zZ|W*8&vCcn@O-V9C$5JV{on>G+fNr%!(Px}>*b~=M<^z#@iiU-dN1UM_Nx64sEbtdk3-F3dX7tliw=$ zh`HtwOVekH(f_~H&2#b5X{@R&Ia!V&fw}#)hLhFK^y`5TU|Q$qIicWX!`c?dg+>a1~f1TTa=*4?*L5h`Y zOQ^{qw%+^tjWKk4jwj4oIl(HsUTXrgZ@a_?Tad$Ib1wFzx|%o9@EkFsIues4hRH>Q zQdC6MEooE=Ze&hi@3(pcS|Gn}ZFU=x-WION2<9&AGYl)09C7b8WJO8lkACF!lp%r_ zG^{seR}q3R%P&3s%tP2DIB5nE*tbO<>uxrS3qYfEU45HvQ)YPPNdfWK@mV1yvu+c> zO79l6R%fNXVB3n}cE+HnjP$r-uMtUKHI99Ps@)Ou@yAhA0$~Nzww_UXk0*z!67fz2 z7h4K~B|9Xqx;6a`<7uzyPLOxjN2kLODPv;IE59%w|4Ha9IwnnH<<+_Xf!nl_nhlaG z_aI=TR*=_(5;9@o#ahAE=*hI!sYJlBpW7tMms~hiAq0EaSkY-W36$VBtH~12?~;f4 z&2VRWG6Yl*3=G5HM3%NNdrhSEJ~KIiJ5F!u>I446a~bF;_CYXcma4z9&fqxMjynzo zi&)KaY84e0@>Cdo=Gr}OD{lAqlspu+JsH^g-L<1nKKf1HdoVAw?myg+>CV|yAIG?( zl}wR!nV}-e>!MQs93sItAq~O2{dzQ7aC^RN_DyFeP#KD_6eX2()zw0h^99xEGM^j! zH6@j!aOS3)sdZQO;Dap32bqxAAbGU%zP*fY6sAdJC&Ep zDn2-p;WQ*ty&TnnO1ibUKuYF7K)-QxjiU>RQHy}+O2|Y*@1j}MWddl&=jL`%kTxG; z?&1(4vWwvYCFU%EwszR*bvN>q_(W8r+amFmK=k4vftB9RRh86CGgGl!ZI+rNR)O49 z+1CI+Boxj42q9pQqA`3C${+DzoPF9qb-0YWRDqoAt>~<)A`fVPeQAYqa<|tTmvAMn zUyOqV?$ursP%3`kYOBWb;T`M5CxzCf6yTu@?cTIzdHLiDEh%aSYkT!^^h#5T8VaCU zy4sAgbP|ot#fX$vLoM}cZQamP(cJS>%0p_Yijpe!>bcA5m6qOj`DSt*P|QPFDbVf zNfVaADjATypZYzt97fI;P5FFHWJonrPZ$p~)mV}@bvmMO)i*I|bPcK#6xoBz6P{A58&KW1XGQ|6lt3 zXE<`Yc>;_&ydjvlN>sPr2Spl>q@RH^^1QDY#m&4XI&RF7uP`}#hW@L*Z~#4dzQ}U$ zjTZ)8j`~gUyLoRsaFAztKpiv*U#p>lNa+o1O>a+AJyldZ+q}T3>MOK}NFunGTjr-! z((WM}PS5aPP_{Z&dj|vq;@f%wCt3?w_IV?I0Bw6VuQ_A7vvPNNPz<^B{Bx5)(S73= zX$ZQ2MW!}TKzNsyIzZqI-$A_(UNMc<@9ojlq3iJ$SSlD%e&XONLG^{y@Ch=ivdI?_ zVIzEgMbUCek2-j8S5M=>$Fc?mVHel>?&`Yb8k8i?+x5NHHqU3qzA#oWR+=GH#_p;H zD7HAc!E0Td)+n$xzufnP4`p*pK$<$+IB-iC;P34_UA%Q2rEIA)7|t1h5BW)o-A7RO z=6rT52$#2%QY{+zgio84>Vr<1MYO~4xXh6ei8I=z2bzjdnOvNWiokjjUr)x}oX~bO zGbwqN|6}DASIcS24iHjoCv3h zx@i+&>GYlud#Y1>oyY2?t<}R?h{5DE z_ADxLbfB!#MJS=^iUq=d?Y)KtcQr30m|%JHfl#A!7`2lL;~9*Js~(i;S-Mtui^3ete4y- zPBOagQnGn1(iz=+0VqNqZNsw?4S`SmQvJez>$^&7G8D6l*k6FqD@OwG18)&`e`tV_N(KQ1MV(&S*b_~i` zdj7l~nX0vpH=5*hu-XyGAyU7BUI0~U>EZ9dCv1@wwXo&(c^3Cft?1A_Z`4YF6RNK4 zWQmSVfUXgMSRZn(dUzyA4ts0mr5E9>L@dU!u!~DDmJb{uI0XVB^(Lk%1EWSjv7lN= z_8P*hdF~l#%EATK>_wnu>yj`4gQ!9 zMw-_eecsbUyW_haGo9omKZ?58kFq?AW;(}`P%mQ_ZL}i79>MCQZ%!Wcb zE=#%5pa)6IFkDR^-;J27*5j`8maw~=3O5#DQnduAY6g95L!8m+3nGzg+`BWAC=kk8 zPv-67LdnbKFf0p}hIRIzkhMc9xTFW7Rp1PdCBZ5H`k`ME&+GHsBP(x#vo_H>Zpn$P z8K3vl2)lrCqg|CF860tAI$`IH!iE6S z>A0P@R`4JPEVy+m-MOoZiKOC$JgDz zG*O!=&qj4HQm4Tt>=AGkPr#lS#VRV3V%{hy#=AcW`9MP}j>h;v3jmuVX?2a=9j-%k z=OeV8F5B2}+4T30* zKoWhaOTYp=*iFm|&xI33wc7lL8^qkgL`5*x(c;0O4o|2Fl>&5h5(S zL&V-&j;%L{YECF|)9XRNP3KXfQ-G)8ycor~wO|XzxdlS(G`8yTn0n|7`R*!#gBv7J z7O&Gc|K-F!YMnt&5akzxNbvizP<||*nZH#R_1v};afKk3R$Et9uD{5R1+Z@RP$dUn z*@@Lcdmf%-rovtWRP?C})F08Q@wMGLa^s$iCfy(T^vQE?1|=Csj0FaKEX-jY0U3se zOa*i0QLEu;9X&B}7=T@2m&Zn)U8@?G$@?SY4YM(W+9)Wl-Y5sdbI%6%01`t*7V2IS)O_#HXR(J{ zC?HdwI4m$F&Ci?IZPW(M^qJ}T{q1?UjpjW+iVm1wM>d0-%T-5?U9O5{g#xk9w)W># z4o*;o5W5@F(Yqo}>F~=V;r+urSY5A)?(MGCIFx{1lg2f4w&*dDzoUo4=KXz5K(dnn z1SSp@xPmrIjdPAfT;EyWJv2KIU_8IQw_k;#Go0OX)zo*LwqpFH>a9ha9-)I8T&KPw zP{`!!9*1?0;8Q_v#$>$Zc3~X78bu}jN=P!?Uq;U1LAdSeBBx3JsGL40A=rTA zwnRcb98L#z@FnQ|S5Eo;rM#-IjJVs?E= zvv*dhSZ?X*7yX|vI@)4S1?)Zdi&zqsYl%IG{XXtTup;&ST2l}bFTb(;(;&VIQWFgJ zUx0MT_V3SA01f|TctL@-pxzZhMxt)*&5J$+IXL4q!#7DztNQo&L(S{TUt;HQ#K39P zF6~|lK;mKaYnDL|bvd_Wk~g8XW2vF3=6&pYjAmD128$~wg})8c7o5BnV)CxrTGAS8 zou!ad4XA!jhj;Q+d0ukK$abz_;CH#X_b>$!Ywh993s{<1Ytb(ZUG28HQ23KLkbQqu=#nP)fk2bD3j!sB101!2|lgJtd zti|EjFo2#6WfdsQ7`{3vdpt+D)j=ea0-rNJUTL>QNOgrl>U+LyC(6;(M9x%wrhZ_+ zW!oM3u>Bl#p=N&xoA3Yl;jij3_{Z7`!n9WU=(;Z_J`zc86Lfe)nGj@ay#sL6!E#aP zdm#MZ{h2RquI%~8&hmlD!ki_|{RbsWFdEOEf+^$fzob^JuaHvhHX)SH_Egc82vYc5 z;U}jr;}3P|k^bptz{SrgUXxIXL&HCiD-+M2tZ-=U2;BDQv~v@@;2X(6MO#fBSx`QE zJ^#CpKk@g#okpX6Dg{3^H-B@qp&-BOEf893qtQ6P8H;!rjAO5Y_4BA0jF^+ z<6Ae$0A*iw)U58Dbb;}167-?KktDk3iY&uB%m0}8xHN0=pMLt>q?ErK-^hG#n6oKp zB}Wx`Y#!+jhM~XF{bLpj%V@ecv3>UO;>^9dW@f;st&V_K7jn>(whxsPKQ$(ho^Gxw zcUY*5t2k=SLx+G^!ni>HoC?R+m2DikGY?-RBIcYJ7gdRz@)r+H!W|@rM=!7CVm9v4 zyeNT*Akal>nv}}^TZ>)JE{*}}MV;xxv{yX(tK{P3oWp=VE){4v~PV1D~@5nKp;O11+ha^FSGpVKAc)>V|pe?7u0 zn|0)fFW>Y4(Go2xYtY*`R?YJ3s%_ddFi&IvM|kzW7K0#UyiPFWwCfO@uUx(kYzMlI z6iB~;9I}=f;1zf*8I<{O2a2VbE~^t()MgW1Oh-L~5+{NiCWBjrdp#lK5JmY_GovQ_ z{r~jcpZU}C0Aax8M>jbPQ6~id`sHQdq%c*?bB8KL82&(cM&e9G2*k}4qKAqmA$bi~ zeIfVPL(e;r;LHtrbAL(dP@<4jx-5ZT7?=xXchD5o6({rzr8YscG~qX-l!oL``5wOP z*o8_QWH*F;8K<89F%Ya&3FR@f_v9io$6ujsgi%ag#MWmktH}b>;!rbvwspSNPpYd* zCo@CglgGv24Nv@2PK9yb(5PKu=6ywk#GC~vgADrcl*=cM!K?G2i&@}jAwP~N4i)kc z{iPNA^gH2Q@mzh@*8Mx|!Ua9Wj<3#;c9jzFkI2HWG9Vq*EYpe?8(1-VGLw142O#r17<3vvo5Z_M{=m zY6un;Lc_w{+F15rr1VWy=EoTNR#5ZL^uOQziAg~RXARGPn3sy_+h28Peeo+adU)N- z1VjO7PSK8|+`9%S-GW#Cqz5#C9oG}8yb(Z>+0~s)Yg z*ovxT8v`Kcbi?c}8H|?TV~L2=J@4?>*Srunb8jcwAVYEfN5t> z?dVs7^?E&O+c4tp(sGqa9npQE`9T$OmUg1xsiLZ)|IGPYtB^TJ)3lUpQv1~gNd1t zZj&7VsiRYlR*@A#7X04PleAeexUJ9(PUZi zAT!=u(rW9{Yr$TE#iZ)u)$+FBK#|a_5hx9-p1o6*eBY_`54tmWgK#?Qbh6B0dr^%g zL=cK@w!;Bmd@Hl1K0qtkEOjbddqjwcm{@42LXJ5#Y=0QZRqJYCd)$0r^-5Y8r(w0+ zqS?49y+o=4anmC0^$h;Dvu@!(eE88nE@IpP5~_b6<0P#IkmTQ&mGl))bl%w`jZrLL7l6ao4!EBz!*uW;5I?!REKls-hPhjY zPn4@zCJQg63{^(E|9&&OCGEAUShYe+Pov5c!b>@?ee38`nFFr-(S$AUXBs?MAbA6^|6*QlylAB^lr z@0;aQHrol&qt0T4w~jl4cx}7pz3~C;Q-aBGGETz711}rEI7E{95T8g`Kv>cjqwtG0(;`pbub^TljN(!?LtGmX z=u_Dd+=t$4L?5Pai^txD&4%8!*j{@e{f~MO#XdK;A#Z0i*touqM&(2t=#(LWTcaX7 zIdebo;!nwU^j>MMX@V4WKg;eEK3q^;J*3Bx$p*j_2J!D51^4?gq@pkj2F)F9dU&np zPnRp}Z*0H??rm67)nY1QQc*a7UH3xGgwcn(HxDJ-cQw50z2>(XCnrX;*=6I5xq}qKxo--{O_H^`u72;=k;%5{)e70K3WSPT&cZS!Ajp)+jLm& zO>V%-;iBy)HqwxQx(7%8U`JSEEoU*}d1MfR`fd_sBLSAJFBbD<`c8amDL3!U7MdgV zEc$6QKIub(255^`n@;X;C@Rp@Zg0rZxA-xIU^OKOo1BHR4tjxJal$#qzGD;P0rhLx zql)yTvvOJW(ff%&p{{;s!G1_el0#-Y{VoGf8oCp72D&GI1ptN_S75nZFHUzK|lV-9yv!P)>Wui`^0Us8#pUCuL#YlR0G8! zN#nY4&{z&tto6D<_)2PX1rkxXU66?FG2cZaTwEr#iNKHt8*mU#@8wVNig4hZ_R~hm zda;8+0tM#tRBoZU#o0nt6JPqkJ*2I*TLij&YfkoHUjyaMfE!5$8UI# zo}vd`EgX@o)i9(GsrFqR8Iv$!e%)13w{f>aHzwcKo}61p{s^EEg?PHQrN}w{UJv?O z^=YKW*C5B=-i9$D#cR+!1^#Xb$dT^+P@@O;Q8XU!R&e^SX_tMi( zy&dt}{#Ew7B?$g@*3URFNwGwq@Lv(`bfG{cMYU>FAO9rCP2n4~udAU)H`ivFPbUVY zJY_1>U{!aJDR}wmBtxUp;D^Un;x!T?!+|W<=?JCk@#VqhygO>Vp4^FU(gBMhw3TZ5n2zI9x=NP*VC?w=NG;Z5Jp#z6!_kU^aa>j_L=S}j{RF3ohpIR+@ak@Cp%j$ily^vs~jx|i6+f4;>S1bX* zd0NViuYrI6;k$q9-E=YR?T$xR@9XbKuFPB_cKKMvrV`Xb~VA`~1p&5r-RZfK0xos<0-IeAVBDiGWkA74iSR03< zug$D`4OLglgG5c{#Lay+n8{1$HQ@T`sX1k*ZI;JSvE|V91a1j~QFaZc3A?NoTA7j# z+^cN|{=T|6FG}gx3I1!_6!qVH`*Pf!N#Fm$cmLTJ0GoSW@~|tTsYvtVe_|?cOz16^ zi6q!)9k`>=W{pa{bKGQ^E%O^i--Gpm2Rj&jOXf)p1kdH9p99phMkPnX@s??QXh-PC z{>m{zyoQHXMoBs}mb%QL3O=KF78w}UalJ+>OFY zVwuiqL6wF*eE({{CPQb!anxIy19m+Z8gXik({phYbuBnfX0-q>4^1#tiuW@F69KCE zi>H$=rB0MF>nUk_AjAA3fPOffeZwXQnT6iapf@Jrzakny6~rGxm~SWG+Cl^)=-zgs zBG>=A#zsv3DncZyRP!%}_~fIHYo?$4iU~wwGG3x^o1kruyB-M~1Fxrr6)2d=4qS74qIsZetu^-W%{nbF~BeEASa4%sxwl`iDw5f8xqmKNumJWRud%I z(sbrS2#x;i@%dG&cT)HM!;e4lYu?OIqr$f|&xh)Mwu50v4GMNl=X!8)KZ&F;tO<&P zXzqFUtXnw(+(5NTljGy8cSB*#E_bQOT6f__*~~uQXwepODxAV5eBU(S8VUl8u%y4c zM!-~Pwpt6?V!ZMznpG;dxO7fGL^P`f98JsS%5VUkRq_WxaYvEQ!R#cP3sIpFFiMJU z;^T3$$}&#wX!d7xa}aW@FQH5p<+6F*j?s~yDyDF1o1`IN=WqTk9G!2V6r8c*q<( z%GdQru;n_kXmr#3>LvjPJ4r0|P2-J=tk#3|RnKB&c4Ucxl9%mn&{7i6)Y{H+DEr}= zn-;%ez*d4ERVD#(efRFDj&S|$PS1{n8qzljJ%Gzx(gtt zMxh9!b7pZOFlwKr7y~MWz^FYaWme&c@*iHQG>jtzXy+#}Npju7CfWy?bkRq=veiQx z&@T^sl=^JDtGbHTqPTa#kN6EbRB;AUREe5i%FkXrsS|7cRox{S2h1dy7?ZHOY3V`W zOB|CZr}aWeo-Jj03X|Si5wYm4t-6;+AvawnuCsdp(l4;t)?m-yQ3|x9_N>dots7e} zy`T+H5}W28|DxA};T#h_w|J1%1B5vx52p>A+1qZPZi>YoXhz?7SPhUF{C_a(Cg0Kv zXIwsuvQs2xMgbfAAwDih1#V{P4`j$Dp@R%658<(v3{nfmd5PCFbbv!pRzkFRphQpE zE8f`46w<-FCjdyjMLu_G+M6;0KM>%_)HlN0(4}ZyC#2Q5d5wcwVrm-BdajKWV5_O9 zU|nrOp{plawn6qT%B z?NdDz#s=BF3@uuncxMj6MSjJl&_H$Oaw#691NtR0ds~i#zbb5@ze?1AJ}G|SB`8_B z(7TRCx73;)ee?R+PVW5E=YEXi42y%UTKA%z#5(PvPFR4lDyokkn(@PjZ)OolXj$#U z&?+|Kv|MhU#u&)f%FciF7`YH!Si-LDB*|Zo@b&oZn8W66pV-#=nmO+HK4k@tf`x3s zedz2ru~}&d3!j;$D!ys1+nKbMSDot~3wRdKs3;>(*U!Nm*jPW&eF#Z(S-qiF8b#EH z55MzK-3K&|eHG?{VgVg1u=K?fPYzYWqi=bf$ySP+BZWPD#1U(m#T@5sr5LF@39G@-mi@>&$15CdrCLdV<;%&V~ZyV<+g1i=uM7)%^%M|Fa_3nECG0h9yJHg zHR^{sTNJQlalAZb(6^IxLPKm5XWn`Q1H5i0hm}Ue<31Ivw#`s)$htGyW_f%28^4NW zo!1gJ$qriW#&F%qT4_*>E{xUW2GaXAsFvC&naglYX%P6oaWJ$!`JWj=psn0C5boPjR9~}MO+6YH!^Vs3$Cy`tM zrCM`Zq$PrQo0>s>0sB`Wo`v0YI!O%Y3R*x-lUEfX@C8J7F7DxoGwBgIIBX-yT@DgO zB+(_!wImhVjje`!!9*&S=Q%x=J&x_ScS*p;Hvl3huIBC=4d$?=?y{R94{aYn;1&yJ`tX`ks$!n zjC+kdQe&=!U-Hl}TGVAPEY^mZwm;?D%YeJV@6l?H^H_=Y?EotR;;vXfFj0OwW8$?= zOV6H{CVv$A%D>w|6@c3N$g{1Y^g6@jMF9E3Wcy4Cb6(iVx%GwKB-yoWYYpjh7)AS z16jc!?#sp*!5K9Qk=7oPfeM}ai`#9rDe(fgo}EQ}5;b!8>e7bPG2u&yqS<*AoMGe8 zKyg1J8!f30aQGfg?&=f6)3Jpk&J_U}7S?fN?>Fk*D(-~f^k_*xp*2mt3d^e`5Si(0 zG!yS6RqP{PQ?KOw!!)^fDISs;*e(+BS^HKUmC&|I7Q?W6CJfl1dHd-a$kMZ?zKWac zrg?U2Yy+Gh7hTlSQyI@S2X?H;%Y*eAAF@rBW^IA(a9&etukJzxm=51BaOtfojk|d7 z+j%MN8biyK3pE+J?qQ3^MgR3*rkTF~Mx_eUjamm;-M%4#$x_QD+vT0MOluFAT8jwK*8W@f)}8VM?F&GJ`K>L+Gofj zsQ;o&LUf!ip;Q9b0`4$N*nR~h&JR-Q7e}5IV5*wb;PG#SwAuvJ9o;dJau?(GaEaOG z=Q9;giZC_;qxm8jSB<5)&gus>*_WR9`St0^1c_6hM{$e^GFvJzoOB)nw!7tJY(;aZ z6%}S0hO+7`$6>v7GCU82rmUk>M2L5}&@!bVb$r5tcVFcfyR}Q`ft)9c6V%SVwdHTj)Y$X$1b`4Al2F6z&WtR29G54! z^1bL_c2x7N$j1pJL&KAl$NTz+$^wc^Q56L;?#4+BJZu3Uc!c&w&ZECLP@+@@uPLtt zuR(<(&g8m>e3qVl?x{9h1p`df7vUx#ndy;N&9CFolh#q%ZQ>rRZ!aINuXzLFrIr@m z1EuR<*TN`q{O|r{{Zd$T7%HqeCwjodVY022Mh!~ap-&qf?ZoX_T{2mS^_+$#$F0ZFpp+b7_A#b3Ne$6!CM zW)a!jpMaTc9PFDwwaW6X$>3q;)D3a3kb^QiyFg?6toUw9`rh;uR7`?Na3zEhkgb^P zYINWCKKalKK3No3HKT~Zqem@n+Lx>TDwRfVI$e3TL#Wn-W;c;lJ8lwMtD+r}2Q{Rv z)#Yu!Rl8^rYhQw3LDV13riigxIT<#6S|F1aFoN1O^U!&*>SIxHi~z4JzO;z)T#`AU zbII3wJ(yQM6SO0d;NefC*zHUU-1=!tE{?xbnx8ME#!=Hcu$6lpY8ZVUg)#U)MVA%#* zK|Kd=W3C`2SOE7$UZ4AC{k4_RIZT);@Zsd1nv4FrltVMqKHjRbwwJg39$aE0>NQU- z$FP7-<<-oUgfDu`?;6?Nd0&yNTF{qh)Eq{+S#(qvCv$s`#Vx*W#RWavekS(F4a%-nFokM4piHk7 z^qTibBah+ud;!9X$IiuI5q}D(*bUQiR%p;Ym+Bryg)0pql$r&H#&xjE zA+k9^kaA$8*1d|kuUe|5i(kMz!!q>qo??z{e8XLDD_n572PS|M#?NwKAis@P-!RouFe27mV z)D4i2u}YNwQg@Mv4Kz5;`Q*Bm6@6u8wU&iYg5QmBw*E<0m=w9t;i(txdsj6F|2!ET zrG|5e`bCknTu{&>EP2{=(z?0|t8Q04@`p?xM=0!$(5&JyRzsDcQS)hmjuJb!(cFX3 z9F3+kW(k_Z%^5z!8a|MVRy3Y_W#iC$I6iALg8w;SIp9I`79S@Yv2h>ivS; zEm|qv5i8rs6v=mmTJ$W)W-(ZN!&8ViZ4dt%9_`>y7B&p$WA1r zNwi$9R#Nc7Jb&SlY;w0rhZE)^;0AZYZ@sXCmd;`a^w$HwWrqM>jqP0*Op>2cOg%I5 zOj(JDUZ^zymUUA^iy-q~##A+sd|edY5I<_@?ah5Nnq7K#wv$t((qchiH>k3h2^1Of1Ufrb0G6qCtrXwL(>oQQzLERR}|DT|K8&#@t`@b$CP$N%2|rg zk>?gYB`r@_Q7%z)H|0+`z`6@->GRKhjXDInxjsoX3Eamez>p6Iy$=Uy%^{AI>#pp* z<_txo)&6?Z9if7OUC=PV%@QSaFN@L_jt-;XtyWC(oBPpnrK-JYnXk4~vhgq7jp{8T+d z`Ah%gCf^5=;&sLe7iK@hR}Fpkqd)#~d_;V~f}UiJTcK!*;eH`nC_&XfpO7|A5g|`(El|HGD3m-2x_rgq z%yDgf>83?Q77CdxRFXj`3z*U~`d5Q{h`!;^_fl{#KK@qBxo>F976gVDUTxFk;>($3 z77G#?wLnHNz=DM+(udQx-h{VJ4tDQpAYCflY|%ljHuD3nMWhFek^xE1P%A=@hsMRis7ak}P4At&& z6%(>&xxvf3gSzfthX68TWP5=dKiKeXYk;);i*(%XGNtTK5D}aV*W&#YpXgcHatrz; zM--OwFChC1sMeE6r`;hn^RbLW$*BFc-qCuU#<4;E&6Z>ESVNPGe3=Rrd7~Y*TGgp!sP`!KGkZLNB6@Z~a7rAXcYFqhc!`9o z9zy2O6_Bsie2g=xZrXWS|2pcxj8{ARbt?tc!qB4z0L+k0b}&MV?sE&aCX90C^b#y| zxVeoiW)>LDIV*sq%Mg-_U!YY<4j@@ zq>Ud(p-B|F+2w!xc=qFb{_BT7{rmrGD&kDXL0~4LP7CIaVXGWLA-u0=Amr`46gLM+n(}plB3H1!3^7 z4#N^rxoxOk9J=3G`yc;yF%wv>^g{0_+Guzz>RURwPH8GLySa#J7jwbhq;9=&f)70O zMfAxNvTw{RH^`6P&JE}KdC7-^<=37Vwg0aI5;A97dzvlRyYEv~GObXYaBbAJ^%8PeLkk>=}{@;D~|2T;m$NPQP z3{eb9Bp`3T_B`LIPKh>Z=1VYc5cGCv3a)$C7i!^7?Y12CzUiQd8eNE5y;Cxq|_4f7&X8qO*IS#(m|p=T`YE?aFG zr)QlC9PD?Gh?Gy_$Nt^i-JMxcgsun?1|#!BaEFD)+MY|MyKj~s}#Cg4adnfeNhxwma`Dg_iRp(Z{M$e=?C z(d5H=A{4IZh!KPc=FyQuGAg&!Gl9Dx!32_FUmsv&WB+;ZfYL_Yx5y%8726Xf^A$Hw zGX)>z2mLB`@=s^O2#qLck1;m9FgtyX8E5biT|9$XsKuxFf{8+QDs8rcIZsv2w5$Id z3RpB2U(2m|G!@KxIOGu`Ny_#yzHEeXUo@tsN*gbSWSo{cbHAFd+BQpt$@= zEamdy(5C4QdPXI4#_2Msl*nEr0e4^b4Y0KU|=$I z<1KbEi-2dLrl!t_9v+8j=Q~&lb-t$k@voRc=%A2NhrE;G2)af3bUE;0?WCQv|?xd{O8O4>G^lLFj>{?x5(J)y4coP{NT$jb>l$hSV8sp!7{3XGiTdcG#1}OnZ--@$ z>CNN4zA_?(K8_mvv!^EEJ(ffLC>Di~= z22mJ5uIA^6XAZfpJPFCU&b1(XQW&DTm zPYdUpJP{nuaq6!&oU=Ct`3B>zva)5@m)Cre(=*XIhiFPZdDIneuE<;0mq)c5f9RE2 zez3JH;k>SKpJe11o9~@E0h_G!?V(G}v;zWAeq^%5R49BNid-xC7pl@Am1$JI=-2+J zh51{;7O7>eH=~lKZo0Xvj1f}>3x6Ez|EMu`kPGI%&ZFX;eLQMDBp0lo+IB=!;bad7 z2NjNwge~K5#0O9e1tG-47ae4(WoC?Xh-kk}QEYK;z!7u`K{Fn%>EKURcDk9dd4!t? z2@d6t;A&f+ygFi5dTt2)B+Rzxk)DWZZEGEUQh||Y5wXN;F0fqdQrEc#jc+Q!lO!KZWP9$%<(uYG;z+aH6YW&3C%<9xrBgjKa2egYP zM_`NNH&T_SJbM|fO3LsP>r6clG|Wkwj&n<`Mq>}7S6JvQwn={%zYB;oGNDgOeJ^Wg ztsgGe3vmSF^XN7MP8WjNwS==?*$bpxxX~n!)M~6)cuX9^A6~vW2O3fNs?8^lDY$84yq6NO5Fl?z3YO-v447 zoX9BCCiKBduglfcLB9}^NGGyzLH$Y+Te}|bEE^>bL;TPY(OE^?aDokfgCgD1^G|EftEou+n`Dk2 z;HG)pQL)TpPC|@djd~s2lk}<~;U!jO)cVqCwr8VK2A(xkoCt_(s&|;dkzIri6>-<5 z=td_K?CM`U{6*0fLAU<0zD>|q9?AMiut!RB@sa8brH=)RIwTV^MKvBRnR=#uQ2`?a z->7ZvrB7efD1>Y2u8q}4E78yG5AiibfJ{}kx*Vb*pSLrf8p0NR2JEuoMM`YS%(UKK zwHto?%e8je=k;%-O^={||A!BMZZ?)gz&9Ey>oQH_a2Wd*bPPeA!c3+>`8cHU_At&q z#eEm)hDQqF!QL+xAQRnhCmyC1?1InsKIv? z>~x_3?l}-jmfy>8O=&$N4-VIAfWP!hzf=oJm?2viCvB3kr zjBD$CtoiI((y>Q|o__wsoU{Jki?w_;FGd(I{#F&$idqt4;Vn@T{(>}lm7G0O9#j`q zD9_mJwKc)@u>JfC?^1W7Jbd_#->46wrB29jjGyB`>4i6e4#kvdZ*+#4Vj7>) zI3*4UqOj1M2wHuL)6LTR>Yl6s)99<%Lf37!dWCdwz6_Nq#6IAF@7bvp7Z{lv5B5v2KuW-3zaB7Pznc2b6xbpzt3sEkH(uWAOU13|YPK z;lox0uV3!%trZJ41j=9gRj;X=%ucHZ@fXA~@HP2YT}MAS@Gfgd+<)-)6)r7Q$(td0 z4ZeGLf*&H{k!|w}S!ku{_^1{bKJ#!$?I+@V-~TH^=YbW5gOTtY2B)1cltlWx=n0pBB&shrrK66%^B&rv zIXj)Z>JefRksv3};X+m!u8I(hRE)_oBB}xuVu(ULQ6Y5)epUJT@XX<5!bFz`&Rgt+ zygJ$ohMh~@8((fE#}NKCTx2ZI*9>JYbrFQpJdRTV-+-F3)r0JT;oX(vt;AFCZdbP= z8v#^2Qb<7w6ShA&a)!kFJglvo>5$k>oceThm~OdINQ+PVd9*$F<0j5XG9CC>cvLsl)+4;J_;`uB*9R$K@M+h9)#VtA(OgZTjQd!` z6PV3fopeiyyddND@GflD))bCI#rq4_E`1`3?5Yls3fl5HnrmI;q{f=6NAbosP>98W z)Gs_^CdC-2rm88|%rF!Fw}4IFP#h)|7!s~B0J46<(k?)nmr3zM*EDxW;|?p3p9-?mIKr+A9GvPOS$AR8-!|ExsIS(XWJj-+sGE=x!iVQ5+kP16ti zc66`Gvo)A|KwYNkJ-H1*LsI<~FnpA{!L!q4ENOIcju815ko|nzZRW~MjhlG7MRZ;1 zX3+d{!Em>UH#ov-3{L{yQ&aAS%)OYXI*DeH8um_RRen{~@+@$1>!Z8DWZm_$k56L~ zW7(aARAQupwK*gC9v<`CT9NY~__MIYK{lD9Ut?e|;2ZXVfMgk}W1S0F|HY-tilceT zr}CqDxn9x|t?r~0`hE7yz~6Y%gI7OW_d9L`(eO`>qG>;Hu!3g2m*bB=^1#XZPa`8n4|YyO}U|>m0wGg^>qAgZ3qD0@ku4fr^gJtv>`M6^bT|}-UXe# zv4H1>t<|-7j_HI{uZ}}4AEElx7?4eWWI-x;M+;}xqRDn6uu*rDOAuC(&L4J}2LK8w ze$g=}vH%WHHbUJYVasI?4>#8j>Mb({XU{077*6eyT*dBJdHUpqw;oD8+lo+1#QJXTaTHK3DdtYk>V7sJ zfo>VyO1zlK-;K$N$WkYCwm?*x8=rj|shVqq0lY}~JDY~zd2DlebJ?#3BSWOM2XB{N zgx>XL_&)qw==785El(2L51i?baf3XwH>i7=0LmSefFn`7)(Ajl|McdtcZ0s^uD+{O zRfRXMB_t(qYh&5Jz;RxeT6osq`RvJavp`cwz5R)WA|3>;jiaBe2>CvSc4ev+X#=#t zF(#Wzhb`C#;3`_M(QC3c4TWUPf!swCF?y(yEguwn2NmeCu9Fqlsidk?p6LOJCLRb7 zX*zZG*nk+lD<4XSp3=nBb5;^9jr;^5ts{HQ$qVR2ksZ5yjnD7mp65NM*Tb%~{xAYU zKo(8J?>(4xGaPLp^3%Uo`?>!4r%yhwS;J8G#1rZ#xeYhNS&xL$Itrj!r-=#!o5@v9 zyeYTESg+}!=gYnRm$xthZfIeC;>&EydYqs~FvV*i)@bzIJd|4>;hKf@R@K?tOhb^d z<^5-vm*(MimtwE%dwdF6HRzE4q)^%g0M*EWCT>c8cXLsOYPy1ElpIX->P)(hg^)lX zgbPuDinJs)k~n%@KfeV02s8WR>yIU?GZG}6xlv)rC!aLxvXLIS;~`Qx07WXHk%g^j zpBg1?72D`70fE*eX5v0*nHcc5$HnO_2eo ztrns-#`5;Mr^qo4`pnx-09)9n?p*+L=oEw*)6>M+L(!;CGH~E{ma-h)mLd_HrP#n8 z`Gkh{GU02=xV|RYURh-s0HM~zPy?^J;Jk@SuWR?%?sb*)5DeU&)Rl31YxH=g@Hr4C3#TtOJRtM1I z5Hi+J6{SL9tp)h_laD-;G0&cR|vD4C!&@%~$$(o!up!s%!hzjoQ2 zCZTL0JioP2gaO%GeiiyYqOjOO93P72rfTlhy(a`cq`%Z`1hSZ2y)B;G$Vwgiz)?Lp zBp^a)fm@b7&zh!(c+~1Mn8Ip{7y2I6g;~GQbyn!>;0dDr2_Nb|P=icoMa@Ybv#8;4 zNE2J%`+9uKc@g@k+~Qh?_|r8UCtEy@nKSjT47b$F=HxOEy z#EqOSA-|ybBEsY5l5CXobh<9nY$oC4U*l!sgz^^Jk<*RCpgzbChYAL_wArYQvD2Fj za8!EiYs6^kVMXk_O^qTviF%)S?cnMkY?*mItYTK044DVk85R0x@wI%--~G{V*H5gd z32J&hx#d1wyXyzFTTwJSlwpGDcHF+{bx@; zuO-+g1Rd{2$?EDT4(reAHq|K%?U6#c$2vGn{;$D`f*Gv)5i8@^Sya9>kuReu@f_aN z8q23l`f7A_AIcUL?-6kR*u26}>@vRW6<{aCS*tqQp(5Tj7u;{S#;<>-L#x7iF8?Hj8zI>pf-@(tSU$L;jL$c&X zmr>XaNpL%_Us*f;DpHlU(FM&6%NHFEOO@XGduwiK(L$h1ReNF(s@lsSdA|uH&B0^= zBX3pMX9`Ku!y#i6q>dj)cofm;!c}C9O@rg--jo`#qW*l;Ig$g&#s5O}p|tQLXsR_wUXQV>f_ff}I<9`sT(_7OE} za-vILJb&h^XBR5zR1;@Dxbyvstcg^BWbk0jL{O|j17fiFg6voX8P`WSYe`$-z}_Ci zUQNa56NUZbYx2IT_DZZZShMWgxJo>C$x2^O>M2Ijap`V;=uwg{f6h|Yv9@8ax4TZrI14lZG zqjqO#QTzN0%QPIOkL6SP0S~~U(-cR_q+3Enfi;C)qTb`{?5i7wx?bV=e8!{WwSGxx)jI*NXv(IW-)v}=O zBK>06!P#dd>n!Yjq7PnN3fk~~|5n6p7CVUTf+vjfC!P&Dqv|>qKQDd$qQ(I7EOPY7 zdM9uDcpMwv%X$_=81P;QQ8aV#fxb+`^0#5Ip^WmWbj;7wn`3*Aar^msx?tMz$RWOx z^$V7%S!vuku#&!P*rXosSCcSF*eM`a`UoYKaRk)OI3E%)x6gXk8W4P_yM|YUCW<<^ zZN7LOJ$E%Nw4z;u|`;1gDrn|_% zYt&%RlWp5>9!7-t<@k1U7P)Bi49rk_QiD{9dXZXo}Ew%qBa7rk9y7s*zTo zC#iP64|cO2mM;aUP}@0f&?6{G4kXAkIC!usKA#q!TPKK3?WYF zTN6vW4*WU1HKVJ!xo@t;NNLK4#|@-()I*`j76ttx@~@^Q-76;(C*{q&({uz$*!i2J zSRd}~dPNa%_*UT< z{Wl$(NLF+|zg4he*2e_iAKm%AxJdDP+AsQBkN?$}u|FGTrC8(+pQFbQj3V-48PNo* z4XM%B3}Zw_pGTV6O1=1yh>-o!1W=9*ztd(qMK+CFtJ9AlDY^w++8-zJikK&z~%ria66bsZDpN zz}bLq)Ljb-Qk~^*54Z3{Y&a^|O;8{|a zY#6y;$9+;CetpgbHi36Q(-Zu)-C_4v;W_R48GZmWK+oNcW8n4Pfo0e-JilODpstYf zV|uV&m*fh>^S54-R|nfQTo?-_*WwQARWI(xQ8PSg9&b8sl7;W5mjz4+0RR+)6ZdQ=oNFdfEqvvm2O?Dvh`T5W-&6lHoBO7+Zlm4#0xJHdGR=W-q&CR z1a=xJ%f+cwyXjYd?2+u_Bh&%q=ME9e#cSk0#R-WTsX`saUGSaH>n# z#&nzsrT9Q9Ob@&^U`O#72O~8Q5l<$m(DTSW7OrLtH+2T3&%;~aJ2|Qr+I8=YJe(S3 z^FqWgDGc5}GcsrF^it2ZhroLt%BNdgsqB!xpvVH8ghe2Fk(!|D+(Wt8cy9lo=Xs$A zzxF}B%npIsZ}0@xSGHwe6%kc16)&QH;<{TXf`LyH0om)Ppa@gess&Q^JZhzt@l81`R63v@eeHxVx?k2`QA27%v}yZSfA@RlH~61U zM|D;Cs%L>jEqo*7^fVM`KzlFxvL|k%69RIu-bQmFupmVX)aW;sDKvn1%si{HREOdE z5OHf%h6OY2hSWp;MwW$N`s(X?0n{N}mj=N;v>~qGCF2lN6eTQ4;!1H&9DrSw>R*KW z=-2;Q4F{J7=QMsfe~x1hkXYTj^lYg{bxJWX-?n@~-I2~w7z&|bB8>XSo<4nMnr;oY zyv#s1vB+42`hcRA{y=LF71Dhx)sb>7lG;Qa7g5kr7y>x+U8 z)xWoSlgY9Y$9ULq9Ua{JGV2hTXe(~EYAyGZm&BNaDi!R?q&fCjt^U;(RHYUd$ zREkXZI}u!%WcBPymcLz$99Nx>2kUD-*N{^3T2n$PheYk~t4Nu| zG_!K0$csrHiVjZCet|X^CWcBmN4D}R;XYwzYRZq;`uZu49~Ihb#qY+aamc7-g9k4N zzErIRp@N4Vi$7L$wIQ)h{!@HRCO>U8uHFavf!7$h50G*q<ddHKJ0i`}5UIG$yvu3_LjL_6iKw*|R(Qyn1v%iL~YBHIF zD*4M%(g5b(c;7+=>SGIJl`v<&g%V5vaMJk($1dkd00U6v=0$it?l;0Q zh4zQN*uaTJ7FErOA#Tr(JLvm((re7Do<}1LNWrm|l=~T3(_zbRaRYPnx{7aTE@~{j z3OPLUAa{pTyjEV;o#7imBe#7dOP_vW-g({K*NQbf{9k?zxLWLazvdC0){C}$|G$3s zpNl0q6{)4wt?oDM)mjB!`}+_7u{KJJ+sUt5ru^=HT_Q5ZC}G#SblStJ1nT4@Dt=i? zx52$|W9ityV3MN`C8%rkxKU)f)sP-OsQ#^Is%{Z^h|yB)=RI6DnMK+GGP`9t#HJYl zmVi#-I$B#Fm@o0Pwesr(YyCm0aFF5WMc7`cf6plJ=KC7*yehw>b$kE?yW8K?#ziY12S7tW&y&fw^&vEPdO@0^+g(#zoArnn2mdr& zimm!4@gT2_T*89DrPa4X#PxjS)-qV)r1W{vfaP^W#7pLBtSq|cy~**|l|B?sJGGm; zz2zABbR#6Qx3g25?6-fdrgcKPj>+*NV{}T%XV~?{M|_1xxuaG?vq^im_F|?ZDIR;RzEoVMu zkNTkiJP(D}uObnTl{LniRI`3wvu44$hjh(~42Xvhlgn z#0%_ac@d=m<2X6MAz}i`KN(edU!z<026W_z&SD*oB+$KeNBz|K_~VcLLTZgho%O>FUg#9HKUopY!HIL^2@7|=~hRqU+4^hNYfRhcvN;1W;L^CIErWYSH4itGa8sQn5AKNM}s#~9MovXJ}6 z{^XNSdgCvZpo*ZW(z1lNar?XMWx9{a+{L#%TccBx=u5S2*ON7F>7TLtKw87Fah70( z)UCH5n?rd*594^7bSI^}OOXruQi-g^861r>?6i6lois8BeOpCziu=}+|Aok!jv@g` z0GZL`mX<#;M#{U?hbVO_O{bO}XBahsd;(tqT51O8Np+iBXet{&YB*1yy{LC%aR4$5 zh8~PS7@1uTsI{Ozq;uRk+~dWJ~HF|&aVYxsz)$_@&g2ykGh0Gy7^l~ynYaT zJXAH-Am-NQi* zbb|VO)pbIGrQLm?HeI*j?#J#Y!3G$OLj!EaKI21bWDa9z!UB&l81o!T&O9_39J@A+ zZ=eSNn^Mh*a1ibyu(5?Qd24eLj=y!Hqt{xGBE`~YpP9bA5H-G~8<=NQwicbcGt|>3 z&x#-$1j8H&Cw!~I-{IKd?2zi0?uFO<_JK1;HTIJLJ(cwDFw+pEbi=66DLo{6Sqo)w zITzj5f60TM_jH28EWmJaK@z? z@}r0>;8ycm-pqj7wycqo=M$V`_3sl3@Ylb$an6}MH}mu}$Ih}yVM+r9mih(-;OWx= zSK90%BsrW3003W(N5}fW_A)AqDSq_vCuRzwLh7)Ef)TE76||tsuvcc44N$aIJ&o{Y zycta}uHuy2n|49=*pbEWk>obH$&GCe)7S$mJ6o25%aU|{7-Zd3a{>qt`4!vD>Jj+^-lD#FFX)Yz3$AB6s=)HKe0Yun=!gHkGZkmR0{Vp9F0DdQ zeCzPgpT&DkxTd=@fXSxma{GcoqA>U@653EuPcXZ0I$ba2 zFe)Npj8GuAk8Q&iHl_kjpD@)A5M)_x5YUKT3ojK(FskG9N4Ti^ML3|3=nmCdK-MzS z+ZTh_vmG8IvlN~6@OFd%=pUe_U_Hy$jRuYnAAaeVymkY*lowvJd<$}-l)lvsF95ro z55}}c%Se`5+VB7HyZ_VEMT)+S%?@Za zH1qx8J?REsZDC&MWfYZ>&suIky>3Wto9l4(2Z|RrBIn2QWjHzi~ z-qThv%WIDh(ZOiSP58%enMbI{& zfshC_)u&&4kwoA$@K0*J9gjRN!aPzD#Q$B5Q9quV-zgx$BAfJN)de(Jc8>djiPXT0 zUAGD>Cij9Tha0MY05DG&D3qQAYE>@<)t#x?+kk4KZQG1wZ}}QsxdO-KZ5OggRhBg@ zP-4mHR-%4CiIMR!`Gw5kz#r5iA~aeytb1XrT13%>ROfC#p!gzTJk>p{vbygQ=bMQ_ z!%W3=8e&$jm}odR}0RiJ>?vT=!PhHyO*8X0^=KWzKwz3ABr@`TV% z4WPASA(rDA6OX<_&FV8&T^|BB^)eX~@n#xS*9dWQGL^bdAg{jBcG5Aw-`q0+`e?bn zkwn18fKiS>8g{|*@Zr-JPb^n;hgjZP$xD<#&p}%Y#9?TD$EZ1YdTR}+ru`efozO5M zCu2?PXw@Ne-w=Ji$X2`boNPx##%**!5C~=S_!1YD!@$nit4!Q)tdEb^Zfy^zi$9YXbuIr5?ZM8v;kytQUK%3dysK|-c& zhAIcZ@mC~7m7jP;lm*dnbg|Q3Dw(Ek|Nh@R{O_iC%i~&5GE~owKh#iAcGi0dEl^pu zFo-vOi-*bY+8)0p{U|dOwzV%h11msvlsfK!ZZ^ha(=^-fLN<_ddf7*f-}-sYfCT9X z;xM(=E<9BPFzRYvynqFVldmJn-b$zmi{@pRXS@Ax(lmOTIzSkT`aHKi1mq>*=5MUu12D9iNA;6lZ z;|BQF)xnX8Poz)EuWD~M`7AiA`Vw-PXd=~ zD3y8(dQ`~lDFyGs8MUh864{jal2f4G3L*8mY>dAx9yuF!)9mS!&-_VTFYK*i9|FF{ zPa&^v`9CiTrCH**CGLh4NKk_OSFn{IIAwrEOf*5oSQm_#7Uk|z?)yJ__)`yzT3;xD z{WoQnAO7vbU-sYhZbXRmEk)zgf9Pw1mZZD#E0n#OA{Aw(p?M9?If9ClzFP5UiB7R6 ztRfOH&A`cSW@{+*pcgF$F{c`vSv6^s9lwyTp*SR~BOWx?s=ody9EOc;wNHhBd+EpLv3$8eO5QhtO)RQI5M>zrDUZSoip79Tc5` z$jBiD*(o4D`uLZN!uW1e6zX(^E4(@bQd!)zlg!y-cH=k>qcx;Y64i4g7cE*pNLRcAX1<#!e3w&PT# zw%x|`R;Wi4h3}WwHY3%UeRG0qyuVA8m>0we4a56IgynQL4*TI9n)ss__V9Oxy(s>? z3t;eXiY5}ecF6r|KWH4-lRcUqHg0X{1Wn9DVE4P()lv4Uy;=wWUdxPK$Akk z+AxFy$akTYI>k9nnZt2}hFhKuVu^r6*Q6;cE9gwkqmu1CShYq_LYQX;ie?-o9m((E z>;-tN_>RfQ2B*?*_x@lL^wVoXBCjX0y0?4iNvJx&yf8uCaVQ5+oNW|PQ!LY8I-*87 zWb>q#o_}V>(kiv3snvc>g(*KgfMz&hYono<*FT>;d0s=nQ%*RkXe1Q*309nc@3h)n z^#P8yO@E(~nktBS9P)Hcx#Ebh`qIvFbkjSU^hCSWm4OGk(&9#4R^Y%tixC}+6wdGn zGD8Kt52uk=6lDZot~Ekoz3NG#bbVN7v+kPNAGR2?Nql?XSM%Y+@BB`Xf;`G}Vgg=_ z(5N&uuy-EI*$8g}rRM7Upv!;ZIj^cm-Eja@$PX0C8L?oF;q2hg>PPG0k!EXt1$KDT zlMKMC*S9dF!wpJi_)(w@*dkOJY&Fe2TAUTwuL(AIy7a=TR6{NUMo|^WSZ@3&_?lhN zr6r2Mu%&AL4?p&d8@4%}T6i%LM-lD3m?>G4!^XbO+0~<8ldYeOH;>(hz<;^>%4At% z6Gg^|iaC=Z8h3B!+c>V3)VL6YXV*oH6m*)L(p@K z)A-S}IYn)cJxE9DbCNagLRa~E(#=MNKKqX?Rn)Z{RlhH{%fvbg>jEQMt+PT21}J8I z2{LlGj&-=s&0<2cfP^j34pEkj8Yfx>IW!~x80-j^m%kn zQY4|o?QIXSHFsoi@(w-SEF=`cxOnv6*1p<1T=$4^k|Hqb<0ynT!5Pb|hg*BQ-J)3f zG`2o69hSuf)Pn_RqX%Z9eI$PLgCHwOaFCYiUy~SoQwz1t26gCuUiq``dOP!IsS6UE z3{xO|E^BcRt-V{^S_|xYG(Q5p@cUj?$r?uEr6dcEgCT{xBv%`)UxGPnnComFPAbZ= z4Kl6#U>J_HeS}&GDXjAu+YiB+iSa;p6zXzCDqL?cmIcc?p_luk$w{{)mIdMqm%3V% z6JAYV0yi{IWqN0DR1^0LiA5lRF15JNJV40W?9=o`340Vm?Q#s^ufiM`lv1U7*v|j* zAI$&z{`VgKkpKIG|G)l!ts7Vi(xyA%z)3;4y4aGD^6XQYI@U6LB8TJXDT^j2KSXTZ zeHW`Bo(WUg&}T^>>sj^tWxWLcfIXq;?xGuY6Srxxm5MHOPHKl(7=Py-LbWN#QE1Zm zabsiBlrfHERLu*bY2H*MxPt43AFET^>bN`JGuqVzaju}i-3zX57nw*!^L$Lj4%L%IU3+a#r+pj64 zKb3`C?>}wR5A&Y`frXM~^pX%Ocz4j!P$;oJ^3JqN>?Ndoee0gEJ_^(#7?geyW} z^$7Je7puUST?a+D#!&!L7%`8kWd2#cqJYoPf=9-2Gdj}qR12cjeMuNJRW_5?=Z-cMK4BK{lgmol zf!P;PF%3b{$4>K^=*)4@Dt^|d(4=RV(U(wRfq4YDMyg{8oyUwvA^Y4a4U!{=W62LC zs+%0(aS(|S;-Kt$Y`uN38>wKOD;qD%qpy>sz?e`Sf{B;{PS>&6*?6lIza-3S%}8WAnz+jL2qh2_RQdWHS(0GqDj7 znOW7XHd+J_Kt?5iKqC-|MAA5BB4=L5^fr1-HhbYZY>J|Wq$tv?68*jAcW%J_cog48 zC8<@Yi1`2S;@7V)$B&aRDU-knz?~|d8DMNwx4(Rux6kN8%F263G@q!H|K4mpySzS> zO$dnNFDQ;x&B=K{2{cCrHxc|=5;oN-@{VAB3XLZP=!a4cPs}LByFo4|{+*YV(dyQ( z3AXLM{`#R$1y(EhZ@Vl)o%qd45ZwDp79L_0Z;SPWSx$yC-`?Tz7RBCzdk9)tP?6*o z-PrY0kI;8Val~GwIYhS|%jvtxjekG{wi=GOkCCqM_{Bq}I0sATrwrbgyMww3@zzv< z{k~e$*4P7z_%_Wo;nmSJp^4$3_0CTK_bzZ8Znx&Q9`nK3;KRU+zd^p0$XMZAZ}fFv z7K*AKF-9AF>J|CUm-ZfxQ_~f(ZmR`iLi*$H?*7bM@w`L<2`bIrdsuJ;RJ-lnqy!M* zQN)p|DRu%@${PF0BFc!r_$C#UA&RPakAJ7BRk@&ggA>r2@!1LDA5UlmEE#D#o)snC z8KSdeu3O18wZ60Cxe!CCzRGLIG!N4zeuyu6B>6)Ag_RKl5mlE>{zgXV z37Ky#zam70tN&1nJx`1mkWS-{v`7O8WkZYp-5?|8j}HNl(4RB*@$MIQe;NDpyT2FH z-5`2)QKLiz0J(0{gzoH=F^n$0oWQ#0(!TS{B5IEwJjh;MI2h}j@n(RQe-dXBhEgwq zLWy)^k_w3L3Jw_mu0|lS_^O_OQ6N5Aez5e+Gld1`w&=;-{lmQZS;RBa_Mu@;(9Eg7 z$Rxynza~^u!0Jklk5!No%QR;s^={0YV(?B6VoIik;sJ{Uc8_?(!14M^t(d9S@voA! zOPmN8;O=@12?WWcG<8$t+KX{PtUc$k^z>PbQ(oWQKlpuIPpWl|)u%9eVSGTgwb_1U z*O}MBP5P2t^;jvf_7L5i`YDtxHNV4QIrj6YUWQ-mC&1dCVsG(UUzoe(kR29f5ik|+;L_s^Gj`5EQ0D=C11yPm$2fPPqlkwP?VFUeKWwNqm@h)!R$Ju}z2Tbo zh{T>+8PY_Rcv%#&0?tgRoXsAbupCaSDkamgdCS6V>Pjj>F(vQMW!iZZ%{nVuYxgK| zHl$$bRhY?%&Z#%~BB>a9qrx`x8dEcNS^<~T43ybaZxec;OC^@$y99hELyJDVXN1Q2 zeaM`mQm$4N$X$t1ONh`5^K48aoL0S#B<8T%HR_VZjsUmH_19VdyUsPrUmW+zcOCIw2v!ej;cvYmYu|Y z!K5jWO17y$FIAnvczjsT=`LLeZphx?H^oeB8g&>rD*iG&Q`2NsgEqjTPYAJ9hTJ+~cPu>6q}99Ek`*#!gX8gO@t-lo#VX*@ z_NRpeZ)IgAR&t2q@GB`YWL+>bW0{1gIwy}_6v@#9`s_@x+jmee%(gYexUmdu5u^3uU=xw|FN(Nfi-vguwkDa~7EZBr@g$|s z?9Zh6{O~;zIG!++f?gb&!Q*QB&4)A_Jvu5wS#91+%C|6VI^e~lP#w&)I)t1zE)DNK zX2u!3D0Tti?M(Mhtc!?-KOEsK`t+V)yNbiqOMU>W`kWxz6WpQ1)k{rN(vy-;+#uA4 zC*TF3+%>+;ouQ}4lb9EoxsYMplHw<|*lQVwZZ%307`aEg3D$<}W@!UI$6u$YlIxUy z@-Oyj1_&xCO*E{AAK(44*RFvG`AS}M;?)T~w7l{xCZg;n5xn@MvwPq@J^)H{UZOd4 z8bD3OF9D{g5nFtn1f$R@W#WX)CbmaVP>E6e^dG&7pQqCsZYAvYDF!Ku+Pg^i2c9>EVrBj^rBbJrK2{G`^01E1Tjb~%H(|!V{S58 z0RH;;qLt4(PqMuH%!m4NdJ9HDFu96N>5<8e(LscI=I_f~m=`TLj@2x}KZ)tHtjaff zwit*Ug`%iB!{b?&V0+sU!eOwXDX%Akgz)1Dk=Dbpi=ziPjvC8YoEw#z z)`Y{n@WRPjgokLWoiNqlT&t~`d3VRB*!1Q3Q~Jrp^WU(IC6fer!(jw|R?G+F1XtYl z0woy>OvOT5VDfU^c_DhlC>Cs@wF#HORmOK`Us@Th@2$latxYN;Yb%;TLNeX8Dfx&A zV9NtawKgKsV-vxlqTcOkrD#jxUDap zQ-rBF>1j|R=;-{CG<-E4UC(P6y_W2TUkErt?c6dHwx2ywsl^-=dx7TUDopgu`gAts zzKWRSs(%^G-W`OjQWxpJAN=S>M$~ngzSQgB?(M9$!^J=zD4Bs%ZdP@Cn ziM$}0^;SC^)jYZqZDkT196=J}7!aV284!v2BIec@Bnq5$ZO1#I)#a(y8- zO4B9Y&sXL(fGesG?^+rW^#vUd$|}#~@j`wJ<4>#*-WD$E%aVR?*CrJVlEhdR z8p4xMjK7o}oWAjCW{Qo7*>^R16-Zzs!tjrF@LWRmBKiO*%K68DJ-g)^%iCP#snbRA zY<@|FV0H(PwH7~cIKqHSzn>HTuFR{;1s)t&UjL2ll4(-TEXF%OA@M~ZGqpJ}<>KTg zy*Fxb)OZj=a<(hXYy{dBORFC2U8+1X07xdF+yq+bMPq}Atu#x>2@+d~OR-+g2DK+8 zY8#)fEPWMfhH>pp${c;bI_d5?cLaVn1nJcpo+wTH|s!@X3e$yurwb-cCN(s<~@-k zkosup;gYvtm#iMTD!#gYNwMcbM42TS6%rkm5O5RmY;0{mWk^T?dG`TX9kCH(V3Dzu zJMK6I%He`Slvt|Pn=0Oq&j86~F}(ZD`_6S80u?ICyGtn3J4CW@FkQgsmuwsE8=7#Y zs)QKR==wwa>*nO0XM@xXPHO-a$7N6)e!8cac!uy<>=uVU_ZU$>>Sn;0DVG}L`(8N& z-F#zqW%stlS7Dz-!qCflLZ~m4QFabxXg{onU^~t5^$6W|dfE`znScBsRcS$*z z+?EJJ>xGhWl9yx&VjODqRM<1pl*Rx%3~q1!eNl%9t*XN-xL2% z^lmj?rI=!|I7EgpF}X~`?{QJ(qU`gJUrYB;=J!9|!;|hlzMGv@%8#?04DTz@L`7g|dYg=)2L42UREv@T>GxA3-2oi=sz4rnVay-vPUtUgrA+gdB zL5xR}3Ib5-LERI4GO73m&LUy>$dzuF3*u8Di~$YXZs+*b7=kua=O{#jp7OHcIDpNG zd7oiBcnE^`L0VP6TgQ6XE}baJoB0-4{$W~((Zt{`7F!iM+Ya?a{IZZYoGo! znN^@h8QQscfl|~Z_D+E%mTobPPNc+A>UfX`Lqm~gR(AJ1-hSz9IR4ShpJQ*3-pUIC z_#Gmjjd$fn3^Gsqf{59|7)Xi~Q6tih7v_OnP#_h5iG<1_?IRLWCBD7%=&=VhyAT0V zs!3X$zRAh$HVCl4i_#YbMeGo%k2Jz?T>mZ>Q@ls%iR%w^6-3RBsFG?V6I>Pn*StFa zy}>yWvk0X5hr6H0d#Q{3{g^2C`vRJGgY{&MEf9d18+6dKn0MhS#`7vBTkG0n3mhAQ zM$32*E{JJl(cafrqtj7dV#?JlCPM`Zk_BGOA;rgKTKX9fY>Z2HZ86_6?-E%C+0Q-e zbv>RWUWet(0->G~)kRW(qvCPI@$JqBS0=nELPQp-97)m~nGAcf{2-S4RY3?5k4fFh zu9CL&CII0oRtASqK#fq|BwJ$6fLjE$j~PiF7K!ePur;NR$apaU39Ik#c@ya=qss7j zQ1|NgvJ{YPU$7zoEK?b+^WGROU~P&m{3xz}Dqr&2SpnZ-O{b9Jq^0Wvsr#G%o&Utr z(he=(iCvl%1=#8P1sy18hq~{@=*6`)E&FgyU0|#(+Jpy{OHV;xoHq6%gn=ybcnTmt z77jznG1JvX0$Q%n>G-U~1&bd7&WCo&8dq~w`4ij8aGeqMBH8 zvbYsg(LflbBo%Y(l?QnMQ>BG+cXV4x(173oyuFsfETRUis(4ts6;nHjn&F~^3&^p5 zy_}f{Ybh|qcE<>4p{pVY{dssU&L4eAzw2$7|D@u&bS7KbNKsH*^o~7Cv;L5XlM`C^ zxSsY_>yNj%|Ej~U@uH*i;sEd(+;T03AEgd=jr13f&q6w-Atc>I{U6k>`*e~uDT;EG zOQ6RpRj0XgF@@dRyV5xu&dLeKZ=C~KZBh_z_-Vb>H%x1+K;NXG9QHQ4K43S3rc4PC z$5NxxUi)J>j%5+lt5K~571B50LY27uxA_LdbM{~Eti```NQIA=#Ul#xtk0q#r&DO#|q$s^V!oh}Ki;MdJpROAnqti0KofSww!R8PE_jz>}Zfhi&rl zI$XHP%sVp$93_h9RDAIFj58)NSh!x_$p%Ch)h;3F(4#Kn6%ET`u|b)&`ZfO-_C%+* z7>i=#hUBF*jKC0hLoH|3nJPK(+RKL__+%|iR58I?tnj16Qwp+Dsep%^r3&Q7M2MiY zAKykjI`$F8Rbjog;tQ!L{g^tTYUhHRbW>eIgaZ4Q6{ljPSq>?Vgoi@(n1i@0%7q3P z42vuzY;s!kz9Jlv`S=DFu_O}H<6=2% zg4ZsEys{C@P7euW0{f1xi#OjLpdYjP2fVqPy_0zec}6ndE>uBq@tv2tUmy-6PJgBu zHo#MuUUqH#E*5F&$zwkgV%BMfTyVs-f$`}EY7xgE)&&c-#EStdq_IFtdO|m`Nf?!* zchrZ*W5k~XGferI_bej7A{=A8o9uBuun9Wjvh+>QmCY~4CEKSMm|=n&LW^W>6An8U zr-BS5c)h3vS~D7JvBS=;U)CkPN-o9Ifgr0IG<^Z4Vv%;y74h%5-I+g;!;+C_e2&zf z+t|tlgiha?xlmahY>Qd~NS^--SQWHPMQx%l#v(-bcYszkn0E|@9A+L9_%Px>i#;K9 zgkpfT^&%Pcvr(x^S$_||nco2D1jZ;QN0uJ1gorIKKlJxNDw5f+HzgEYL=Gy)G+qFi zM6WqVMUapP$K4d(R!oW7*Go$iuN_V=EoD97TuU5_l4f`~D(?MY|Ft(2n{iw&^M)?s z+p$iB#7cg+^M(>c|8_?S2>%J4(x^}zjW4*Dl`e5(EqUCl}00U(q>br~Na2Db!LMmqkqS8_a0(E#JIEQ*@hzJ+Qv!z1T zdi%8;6JIy}8V6KHjSP;M3%pZ*XI(rb^r!$pF>-Wm(e|$ScfMSdW+jA-;a@fohotW2 z&MXy%(6B%va)%fdNYoNZ5eUOwjMw+NUiuw8;35auOyb5Zs%9SR1hVgWXwa(g&f8YD z@>hQmo7PQb5>!OR)9GVK)+e!LB;U*JATY;ixsY}`d;UH$D;%Q(fl`E`wDniS64Jb@ zIFWdHAVDhRl}rYQEj}l@->jHQ)bSFngs;h&^|zuuT?o?*D#DBg5~KaxQl#x*=lt9C z_idU*J<>2P9@M%_#~&nU$K~WrZ%O6HXCM$Amj*eGzO9K>pzb6NId-8RUhb5X?XDdE zkjrQM6Hg^|P#FVsiijbXYh8!n(MtT30rujjLTK5MsA;VZ{N}~-luw7_D=RNF*8BT^ zymRcO?t;p326CfqX1oylWRwW(*E;N#IHXI5oRmodBPE&)aLq{3PanVfX`FDeb=69q zAyLYTI;DfC4z&lc^yr~C`sF27QLS?aw5}XUv5^i3w-5oQ+q}zN7)u}wkFuYZ6WtsD zj!WKZ-e@GX?Kbrs#J#;}hji`4e68x0*uGDH_{%@==@$yLh&YpP?v{Nh zq=5nz9TyK;E&2RFj#ddXY|W9G@n!jp3UkIAkCk_@7E@9&4LJ3^t7a2qB5ZH`a;;IX z1SAHP1;xY9G z$M6?eHamxXe@KEFs?hjwNZ>2+v-0{7n)poF!W0nTzXb|Bn@Vx|q3wPwqSd=SEA_z0 zozmOWk}OmFcs{8(aqKhO9*exb*IV~_f*GEG^g;}`5lWFZj33;6{I}l@1-JBc+3VZo zl(mc0$4Pm-TcCUKYK)p9eSl;~8Jj0cqZD3zQ*}A^EfkgBJ&3n%tRH&iwkuUC{D6wp z0&*;TTEXFS< z$E61uOhX_pz2cM;g6=N!J^)APF(KrW^rT+)Gj|Q|6$nue2+m)49#P1#GQv5wv|lbAHbyzwnufnsuepYB+V zS73`@pfTP*NTJOR>kXX1K{m+g%5y~`^_Bo!i4MP9iHv7JzeBDHml z<%qy$tQ(+}T49d~0zQ!vc<&hA4C`EC0$hn${$2-2(Jo5OE+KLNd&N~^tJDCe=u*Yy zbH!DB)vU6Z-F9!?7Xnd+`%WQ#Au+3WsUrwMG;7uqT6(x*1){EsHrKeOR70vF8$XV2 zE^o`=dT$F1_IK3R7!4Upvt@J|mb4IjJ}1^zRq;Bk{ROqbWx)l&x|S)6@|x4=RlaPQ zgK|{^$ztHKCdvVTs`iWhvdhmbMaYdP{s3kpo zD8$lhyN4_9PYcZc1n0S$>q;K(jGX53}3g5+BjMcU%<9fQJKPx-2K9f59y6#!3P_&eKs zYu=8fXBFklV$RQHhzm3f6JW?^qYwVcE5di)we9^F2#i_rpJ@l4u^&a8ylaOvX5v_V zEwM%YZSf_~bTwXcykpFybFGIyn`3w4RfHr`_6ai;J8=yS=$*Qe32&Q{fAw{yZV#8` znA5OI{FoMlzU=Md|Ev8dU$EGPK?YJM28{=FTYfSx!Y(iAv~!$BQge>e-`GamxkUYV zG@O;us6M|ERp1T`1;z0ebvYcdKz*Q|UIs`}r1w3@!%^A}098HZwZUxxr(yeT`60V= zrm1*{Dzr)j4B#=LYGV|DoFP_zasx)C`F!{BJKyz_xzlAZTl^GhPzg;OUlAe^C5p*` zB`vpRbdE=D_;st~w%z{`qSus6&{HchhY@Yki&EGlepX;ZH04dfBCLRXddO^*JSV?! z-tH$hql0YK#O%8AdJHg4?oG7;vm`MgDvsEGgYcl5;U12WHke^Ny}9=bWZZKiL7C&R zWJ!>fRb+cEj zO$zYw5+{!7TJC7t#LJ81oCAO48?snD9&iuGER72Q2eK>uIUMI|U_UH%UE88zYYojT zK{SW`rP%5PzGK9cPeW;3ThRxW5U6Arn6iup`{odAtj zM1X}-he#oioUMfr8O9^G6T#*yw0Ie4-u?*gsyv%jt;pbGRRC zeScA1r!$y!Kup$*2Bcx7Xe?i3G+XCRKbNMJ%sGaKl80itJexB9yjy7|n-0|m8qJSA zVxdHM?=5@upm5(|y4UK?A@K%KX_ZWrLDW!K%LETgC#1s>zDW$ag_Kb$@wf0?gF*tx zB>Eu^ZrtqfG%8?^iD_olLZKmkJgAw zYEg=s%2U#?pokCld%0@ZSO4JM75^t`oxHE#Vi2duj*31a@k#_RFX~1RR3Mz@DftZomVb?AV6M;KDTsxQoo66`A|jJ|@svUR8t$bx0W_zqiQa++g2j zq~dIn^}E0IS$u(DMHU78`gc4ZwS80Zb*zn4EIKh1%+5#^gg%4H@KL>Q^^PsIn6So( zhBCSAmRdyt09t7MAxzd0si#;E)RW&aq;?1$7bAyow^Rs%=$)oMZv7Ujsv@W~_l3QP zH^e|{+Ka;og#tU`uMqlx$ixGno0WoEMm3a-dOwCijh66qs7X!Un7R3oB+RqIA>ZO|I+1d6C zP)4#*GOQ^Cv~boa2047*#&uv~bNalP97iZ`@gtlsdyEP*k!fn;9E(VzKZYONSFsoz zN0R7%6`*z15iTf|7wkGqWP6f0%luw39*I<44H{4geO(fuXms|}q!bSyKZ&vJj^6vh z)WHgZt=GqZNy8G`s%+G)!x;`!RYKBVEM+`&`#ifPkcxO2tA-z4_MOv4LOo9#9_3HI z`kOe42pgAM5iW>i@j|?8LN3Q1%jGI+O1B88mvkwW3-k}spe0+rrR)T^=m)=eb2K?g z{fPcXCHlkuXz(@0sajN~+XqhkveL^dxze50Qk|@%U@hRmDZ55$D~%2pt1M=pi&b0$ z;#Os6#L8LRh|TrASQ6q>mDLHHkL^0$*|wB=RCdI^!JHU-wZ699k43`-XLG#>t*45I z(W5h`FMZkh@=h(&&!J}`Fi1erzu6hltiCYY(1hw5$e1&5Y+A7eX&m%HnmPtbdM*Zv zai11tQxzq?v^py8_F6*$G~xDCUhsEuNOc!o&HGE_jj?wa>i{7O`qcV=D(;TO>wU=m zNN~Ks6tKHX4I718q$=Wpt(!+)Kj?z1+4(dYUbp!CpSM8EpSHts-*YIoxPjhdCiGUO z!9z)YZ4ELWg%#~m2?F6b%GOtk*|P1Vv+U7OLt4)7W(lQ0<%#)``-tH|uyR%myy^J; zbpge^;9VSt*elW=@PMDsjtBmYP%(3`en=Vz6FRQK%mfb^wwcBh`*HVizw1Xt^ARUVQ10|c zR-@-U2?P_+F&@QmcAGOM(;y$(%0m&wv(lEA?V;F%>tV60rdT^=(fPox(IBqHsyx!8 ztZ<$kmsOw!vG{Ba6<+8B3G=n;w~}FmX(q8B$R<;! z69--7!g+-FC>a@4DYrGsdzJkc^Ir3QOuZ7$xb{+JwVtVhkEWlzI9VJ?C5y@eW1wO zKZAPVtkhe8$GDw$K#mb(v=nqmj~l|jR)%R*LLn`^uOzRJvN(d9KywT;U8)@BZ882S zuld-2@NH7FoZROC6)|h0!TT3cHv8>-FoPMMD&pp030O!gjf01DZ!PB)1Jn zg%0Kg<;6_t;5gM{RjTbQ1SfL|@tZfV+{y)9(GD5udao@#T8?p1#O$NJPqw&8e+*TP zPCeAY5feg1hftAixj=`+>7d4{hAB26N+??i2nJs`qoABk-=BpIB-MwGi1}|15Kpe2 z7#FI(^rJtAacWS)1L#{}uhP&toRscYN<<0P&%S!e(L`7+B%F^+a1~z=43{tgIVdc` z+l!)MDP1jCzM}~~8Rt6I=aw)I2AUVJOB_drjW_Dc*=ZhZ6+FdSDx#X>U%yRVARgca zJDrxBER#)>cEOfp%DVhQ?A935yeuyNY;A$fbs%Qd7=n`F`8~`kQPIUg1Yp}P(0Ho6 z1Du7Jm-@}rGYQV(XaPdAy|=L+zrAlJAAh3W;Lpf-Q-4%qXvB=Xn6maoDiSX}hs(8Q zK>D{(0mX;nU^&l#8)*)fERMkCsIUtXgC#OubhkjA*a%dSmm~Gqi03M{lyZf$wEW;f><^g>RAg@J%Hwzj42=N)Yeov@`^F*= zUnVIZYmG%hJgHoIhiY_N$iDx*C2Bl9?de9+&gzu&_5=^~W88Q@Rnfuy+VC3{Wr z`uY2u7q9QmE8nVJjCtbHh+VCYo!%;)P_+E!cA*OA4!S6ZrukzkXY-6^6hIo325)c8 zd*XUeN{nqzLDT{+mh%kz-+laky*&v7H+HxSTgsoqN-J*2Ptt6)#v2q6It^ zcqJY3W;v^p%Y>vESOj2 zQtc-j2{X#d<5oS9xVpINJnpmc`Osfs7~@O94k~JJ2=)dg;2E`Ct^a&;OUA)VN0y%vQe2ynm5`Q4^&Yq7Ooc*x5SR?1C?^?c)a(wwp7U#_+wT3)HfUr`Sl zf-^GW6W}$ljkMdA9xr>K2QzZJei9%{VeCEa>1~lO(aX)=`*>_hN2aZVq)C*E_z-;@ z(~G)kUccjd@Lu72iz#711Rd;%^*!0>M%e3jX@{yEqmJE}vKPe{r)rdCk<5AgYojFn z#|&D)VvaAqY&huH2`V>m);$LNOQ6~gAY##G-sSGU-u)zAvPDR!Tud-ImIJN$#V`Lb zzDm++S{S^>0A3KDX+aC5MU86Cfx)y}Jrm%W@rilG-w4t3*mlhJntDoPw&>A&fD z#+P4YW_0Q4;~3@cbEd*2h4~qm{D2T497CIX5~7dlw*>rdvbx(k=LMja4IGV|F%-*(jZ}U98-PK ziGdyCG{tIA>G-E5kW+=waznUp91#&_N(+@&&I{gy-Z&53(=d|sk%N;{x!s+u?$CtcM8_xEB8{xL%f z#Pw!5iS1^nWTv{izvFdRJC1r6^xn|t#2lRN+@Xkl- zbR64(nNy~QABbQ9>h`wFlks2sg>C-OH6nyCMZ@e1yam(F&g6Xg$EVPfSb`v5k~qc^ zuXkT~)H|E2{ojdCcGffhjsZe1OKXpNVwwez63p$0rr%1`I&I(9H`=Cp3lD(Xug78z zl(57=e;dz|o|+Y)AP5Vb<8p1c)%D}K(WoV!8-*764n%*_cb+XYB}WAtflYgyjW0Ww z@%EM=d>o6i{ODP1p>#z&es6TGLZ$+3qc4H9mACBgGt8_XhOd5*Uq9cCOPk*j`)T>&Jh8@SuQ&-rQbJj!I1Fa^Aq8xE@UbF?qjb zV|&jn+g9h@`Yf4kmDSnA%e?$3t$ETJ^kFV~sSiRhkH+e#7+NrK_z}C?Ycc-ir%$~z z;Mm_HWbxV3v$PNee?9b|xwUG=Hz^2Wk+F_M8R%@|zo3aPrw2ABc_0=nl2W+f@y^px zNwt1S!egm6#>3Xeh!9ij$XI=o_{oCMTM42p#>VS7Xy8p-M_HjR)OV@|DxLB^GOM3pe$cXj0WH zJ(Myh#e1VX6~h@VY{fBnxauL;SoabdtDR-Maa|zP~qWn0j&`t^jlW389icx^Q3UlS2eJ`ziPEfA&u@; zP&R%9v8#+P-!cb3PGsASeY2W=5L1{t)9h+tQbW&;) zr6jpQG2NZtBZ|Cm!aC0_vS6pt0>z0Q0O4$W3Pr2z>c1E(%*A+IO5o`LA@mK?^b)b6 z_xD9i=`}V07y+LRh5ye|1nCf{;D1O3rAEc%Qj^RI=w@?}MJoVY0{DL&o1~a%H@2oB z9{wogglO)@j`yY!tF+B7TggRSM0M?;7lRsN)%%Q)H6WvpSjP-;AZ6Ufw=vTGPY)4b zF2<()E0Q_Px3sn(qTVR$LqBY#{YkQQcO#~wvd9|eXI)58#}J-R-cZ<^K?s;VV+4eV zC@qxO#t!shC!NCY9#3A~-r4Sd5sGump7Y=wY-0S1ZBq0LYui7M4E$+ zUBP(}AN1g6W6+j#Egf8w-;SrOgU{(}Y{u48^7I-!@6kVtLET(ZV$uv0k zge1=F*duA|$=SK(yZ{hZ?&|OROT8#JNG##+DuDuqoO9dAz8{ckocN4q7b* zIUR_rLwk5=&@c;!gzFYtrep))4(#!m&GbiEO(5Xs)v%cSpy~h)3d1OSHGaYHVjRHo zM)2&VeBwD4+)qyoZeH5Pvlw$0yO}k{u~O6ZW}rg^Dehvts&c$WXJp~0722DesB>Y_ z@x6ZMFqUwA!<+E=IKlL~Yly7dw^Ya(gjh^-d1b{16R?hwxAg{?7lN|nUR&jHbx*`2 zAm{V+cPq+cNW8(*2k8g`rT|EetWHF5bdNZVQ-##=gIGyLu{;EVLQM`lz&dFv*$;6vFxWtR(20a$$BJ3Di$qfr=j&&>+puX5%vjkG|&QVAOG@Yt@&S zbgLDqUPR@@iV~S0#DbxG6?F|sBZh+4Yof)wk6&anY|4qfmfV@-`pe?(wj%hVeYlIiWa1_ z4>;7JP(d42#>!wuf%iBflWAPIE8 z3rR!<218g`J?O2q-Ey(g9mpS@z=096xecGNf|6;|!Pd2D5OE{DZ8l)jwlP0qvBvO$ zo)*kK$T8WsO6W+g=Bd}0-65@0GmS|8zg)J_)Iq}1QZgjZiH zCqsIJl2#$t2X-TrA6}dVL8+i)+CO;_JMz*}t^olr;3YT?6UCJ)ViE|Sib(uWs=Zsk z`q)!k-Ze{_@;}FE+vfk zyTHTDDds<@)})f?Zx^_|)%=>-cofQ|#!RSSDiJ^s*3G51H@f`zar_Y5h+v?H3l z%0n$62rE743bqg9h~S*%i!caUxpse7?o~coS+B;Fw@5JqH)z3Nb8VHlWPgDKKmpDRHg7Bon&Vx8neQx|Z?3rj(8S{rGgVr8 zF9nk*#{8$B-~Hn5FTJx|tfP8?UlR;r>*H~h`$<{(aYJQ$?Em(#wPL6c%1n!;xB)9z zf}fO&|LU*&+RKJ1j$eY1o>uFY<&(~egQP;Y7!XD>;f;)e1Gg`qGF8Izqo>{iuB)cNQ{}6|`C@YGdl_GGJ>;MGMa)7m&Wvtj9kiS) zZ;2rYggUm^f#Un|-QUjtH$<`aD2d1Ruvznl-{c<;Rbj&saPVTvU(PPauJqs(Lj~mB zQ_B?4_%R~xU<}<{6s|Imtv))cm##Q7LK89Vmz|wh@BVf?4zxcogc4to>%qNmqjit3}l(;?(c1EZ+ftMBYGP+Sg|&lF<5^ctpJ@stP+dA)rokG(7`C@}5W>{y z8`w&Hif2oo1}dMeo=-;!sl?=U+MP-S`yVReKvt`+l=1Q=b)^CcF-9VUH3`4+U@4aK z;ll@!qnQ_j`+vRLoNSUMyeotlS8p)O`1p*b@oA9ZhZS@BgpSSpI;8AA9?{CSV?Dv) zp?NAOTA-q`{FFVTNnRMyaN6pUox?R)y?+y3!RWxFD)S7q>sYcpnu zBaV3-W0VyI0bM%N8{0eUe&*)YEJX6xzZDC+ys{J8%*qk>-u0m5 zQ_gEe-Zd$cq7#*P7jbglnSELk#M`*d^JZ^V)C&?trD!H1z^}%Nc1uZhLn;IL@Q`0~ zk}apaT?)Kjx%d5}GTMJc6yT~#$|IfShCDfz^*2BJSH-5Ccg0#@TIB4j5Tr8E z?2eCB?qg*_L=MbeiMX$Se&8XaOK7=`stOI#1=>UROYZ{#?HJGU!-w{#+d5NUM`+t$ zel;)<@!2B_8F48@-G6g+fgbkc7=~D9jv)?6ClHW$u|uBY%KG=fwhPIsz;qayF${cz z>&zH@_WR!|YXM%IOC#pWS-!;j^q*>}T6TArd)z-(&zIv`7$)|#l6&7pS;k7;*5aG~ zRyQPOjuE!%?Qt2OF_-updv`ib!U|H4wXsMC(P49nh1BVELVGe7k?%HOkplZ(EAAq0 z6Hl_Ik1YZ7@EC5aM+J6;x*gXTkm4oESeOLnkGxNm#hb7`Tcph^w!xmEIQ(kN^BNp& z4eM1n;pvMPGX6SgU9#dYMXm4tGX4_hRc(jfUF@nTV-Q?_0balcc#%w@;4vNMq_c*f zO08wFUm?)2t4^Hx<+kO2AN;$DsTXPm3AML*mMA-q-jcBOa;;G` z1KoDW(9=f`yi1cyO{qXhaEQr{6sRhm!<_ualR2QNxOa6I$tUtK@e4cT4Ctp}-LFy{ z4k8LAHWky2iZwUDBYaTd%B1?8&sJq}4RKA<895wXV!|N1B|EptnJrrV_4Z!K$#>@e z)rRsHWWvAt!4FFhAjsU&m}M+oE1?N+@_Yp7 z;P`idTYnZC)j8~XP1hl~0FfA*AoKs1e-M9hpl4TQ1F+Uuv=T`Jld7me%$ANWgzbKQ#4IMg^}c`GGD%)^BKE2LF?q;aKYys?JO3SMhZf07J=Bz7(r7KKgX@?WLJ)Tu>^`T z-G<_;Dryb=u)d~O1Os)+|}VCbd)^c(m8SOe1ALgh#`_Rr;5B8i}E5N}0JyuJhY z@-C^_vZShGSq&lqnj;icHd~#A%5{q1wvomt@obI~Pl$HhOQ5!DX8j%*;wU=~2?KSV zON^{8X6yT~sveb41pW5T!KF=$d~WT=`xW=ikQdoxiUPAB;MV@;9xP@O# zO2BtjsIF^3im7R_B1($5*P4KNFDOBohh1ZScOQT7?U*}eO*`D)1#z~@sgDD6FinL- zT6L64Su_dbvU@5rpjszE@tx0obMQEEoe|=WG2ri}U(+Z-Jb;&UpjO~D&drrVk#2@` zioxVOc1L`>GW$`L2`r#gV0M}S0^O}d>B>P~r5KUQsyuDjWOjN{5s)6c}b31M-ymx^wW`<8uZGP%ET_Jj5t3AWkS2dkLMrnGD{0 zGXwor6!w9@G1DZ#eoewP-t)WdpV-Nz$0hfKw^gY2FnsQ*9jk5HS1cn{58iNZC*+1| zgvl8=kobG`KF=HbOAEWAbarMrS%Ue4bk}(#O4e%&vi7n>;wU$S*d&&TB=kW7Uy;6Y zeia*c?}3$P*#eTt&gkMI1(+mLy_;l5a245ep4(GqP!v%A`c0BXf9KzR*A7ZRsMvOI zG-bam*+tP%=qEb56&Gk7;8@I9jd1`=dV5#V=UjHSz@`668VT)2Ca&iA=v#VS(2*pe z#1o%k!fV_(KVEeQ%IoLh-rf4(Q`*R?+lu%XqRegOLosArOwt@>$Rx}u0m2Rpp_7)V zg@r?4f*}^i*rTzZ=Y!kf=zsbj=Kp|Q`jfBz`0lUy-=E$6PR0L;7jn{8J+NQrA3ya4&LpUg_^Kc=5tiCm_1EZYsqW=|1LSfS zE*ny$wid&I@c?B}lmS8xA5R>@74vYbSSAa(Hr6afq*WSAL9CRj_lrnjvz&6|Js|1> zaee*w5jnoo5`&8LR^_HFHWco%nZ9IM7SB!A zmXkgB=jqcClOW_DoBbP~*#MHKKu-nc>RNAN7K3shC<%~`AE&WY@&j5Kqlu(cydKb= zU5sQ>`#+Hb#FS*!yehkc+X5n(Uf_M@DW#yz;t0tnfv6IcJ#Ss1792g`Zmd)+GH%;> zf%2@0&?JsCL0hJrV3Or0&%6P0lPX46ceuE+Q$i4ZC{5!BYxhvFko2ulS%ed4SDW_n zA|P+{aVIF_|J0wSr``7PIFo?1T{_L>BK?FE#EWHRiD)tAi}SbqF#c$fP=9l~6(Gcs zCtg`J#o=N6h>}4!vQxh?E6K*@hFn&9n*qUE4nn%+B=*2pWTXk)g&$1aivQ>j5~zhy z_DyfH%G<_+0(Lp%m1GGBFvZw)#4SE2G-vUiB84?w_Ga*b=PkgdhmBv#@K;rycprLV zJ;4S|5o#A~Z0zBB-(T&%VTG~5^Q)*k^WN%)ZCOuGjijq^-5xA+GD_u8*p7J7s#zo( zsJ1jCp$?ZrR+J)EnjARqG;w!E<&8Vwj+%(FK9vu_o|@s@zLHxU%S#06k7IcOO7n^5 z3}}LPGi^`e1vRH-V^QCpF$#cSY4NBbHg1gm3!$5>5AkZ@Qv9-4!HH?H*aS-SX`v*bfLPLwZ`J{ zq(!PKz2lt{VRd#!95S{I zTN{u`thM^QrBs43!0YvTfvmw{s{}Im$sp$Q4kBN{5PeYY3yX}Sxo>cH|NnU|6hM$@ zh(S>K+1c?^WZgIPWegbu7B9$ut)2}&`0H4@GQR6B?h1~W$z*J*Ih$LG#i#%4tN-Sg zPljK5Tlc`&W)cU4uFt{X!aVOR@iZGrPcBQg75^UGvk+c>Cb*Hy5+T;)iW%C2;hx*0 zju-!=8OB~PlpA@xIEHu#e`HB&8J4=5oqj*QiR=E#3x^w%CNsAwF||seC2HNXG`Jo< z$SS&f-l5uHl(4yO;)S0hE!EtCxP>tzqVaw#Ys4%55=PtEnV$MT+0I2H^Fz|yj)bB2 zghx-wC2=t*cp51<#Z0_p7?>b6Vj&=G&hXS{WPdE>ikI=W3>drxTktj`q6+Av`8_l| zIS|k7j9G*_wRD>LU1xU$AoP@}Jue{via|oNT$M@%R8%wWyu-L!y6NLN8D742xB|&+Qj<2}h+_r16cGz+Qm((S{9Vj2i-MyzoHL!07eV zJ-SE~=8sLde&bXZt60*Y^vA8WMubE#5@Q-7kU1^Up~KnfFwYak=Gy!CE{SAZe{Yvc zVYrQz;E4mN4K0J@ew$oeO^RSWS$a$o5w`yvlVYsw`^>N3~jR9GfTzlWF!)68wI*p zI6+*!HVOvv&(Vx9_hhhX`El|KV2P)na6BttdHTpNY9QhoPKWxH$b|)vBvsM8EhZo+ zi>I-Mm0!IRa#7y(x1(D`%}mlA4i_F-tjF4R&$kja6^yvdM1Ki(vO5pNKoT}si8BBij$;YX|ZkU z<6`S~utEGNF^Xf;S{8PY41c^kxKi5)&yjd48DA8u7Z8&LE646K#|Au%PbTKtCCrxI zj_U~qO>xQye;^1SDsWsh!2$`!>Zw90BT8R<;qmM#j;RPxd=r4AZ8mZFwN0^KES z=R)+H$ubtV4Lwrtpj8~u4Qfn0eimSKUNlz^d_oCZUxK+{=>9%U9MPBFt&aSa*pA_C zrPOY*=Twm@MRo>P4W$U>#Sei16g_YbDMB8lF?6WjAJn%HH>tnH>PwkfzD3twwf9IW z%z2(Y1M)Ukbwe@4t`^iW*biN14lrx51mXM$O7F>a$C?w(A{P)-`SjDfKZ{QUS*)x_ z0u7S;yC@bc`j#2h=(Mt7IwF3^#mojRg)D@^y&wcuTLU$Jo7(7Q5A}TTCHAxrLF3Ij zD$yKKuqiB37Wrn5)Gu!#y@UblBG%9T%@02F7~nO9*Sj`(gGvhT6P}IGJP_`N%p79p zU|V~@FGg1Eqz{f#vk1)p!9(AeH}dcgF_QnY^wX-Kpu(ikrN|`m)!b zpEqP5NHS$H;frxeefQw0^vKi>vn17S$``k%XC=HrCb-2E9KnDQxo}v^F=kvj2W$+D z+Eebn8D--V3<;719xwn^)gYF7yjQ@F2`E`%2-~D9%*tp2l}S$a@YkM@Hg! zsXUklHldD`$%{vdDdCwXL${O`pNp5E=7@T@QjOXWo}AbR5@yUi4)k&b&(IlxTGROG z;e&@B%pnAW>A2t~({;|`*r9rqDF1@-KEFU~0ZmUXn&&`mQ@m;LWv1+4KA;}JQvpRq zF}V-UdcizFGgK}RRg}0BGF_~EA)~??_N>J5;TUPajgs!`gO>$ym8rEu?i^<^xYVg) zkAX$6)!3fbSZiJ^MVtz-;ofH<8gq!Vh|YJ81XePcE}iFi|#A`yi$Sjm+qrBFnE=IzvBV$%D>1A>$z)i|ckKgaKnCuKtC z6!++6@D$-BZ@h4JT&c&v*=8*mSLji$Q2QBJyEeGt9L^j0fBOxuix4!diZOz6-}w)* z?9o)EA8D(v+uJ^*q&=IBYUSxE z&*`_!m_>e0#8Iz6YB%fCDM<$(6Ho74WgvO^*%L1$T9I5n7H1we5T~7;-nz>%?!Dpr zSVD}4YXr2G$8(G_LPLgVHkR;sdGE65I?))UqL?kZP2BJ5n^jBz7>^bbAQ%oe-eBN> zr$t66W69<0bUPlLa!n|nGHt@jbF`s1Ss5@Eyeui)MBuXh7=mhq0n$cVSvOMCxa|*Z2gBk5lp-$ zSs~q+@~RrmV!zgej+w?@^+?o}B_~LZBN%%(9;M(se2{!y2+Vm&M3K%FD|P$^OT|l# z&U%O8&igzbg?x-Ngz)me#IzV|3HOOus+x^URqJ*q7dL~VGleSRQY^tfdI>2Bg5*~I z0!+m#-07ITAfe^7_Rc2nEaL*r#H`?=jNRqzikB{Smxz|i;*ZW|L(g#y?01cUfde?M z4hz5olw>^o_Pk^+JC|G@^IUOCBdE*P=er67(35)Nr_k@%y}0}MVKRxCun53H;$`sa zT3UGS5yfOBGXPYmxtHvl78O zn+%7y2yDG4|5NPF0UErV@aJ?T4pAY%lm#N!mE=MW8=AFfG*DSL#h6n9mos7q9&}29 z_0n3LF07tGPtBhy0)jjtmT>Wi=&P2oBjP;i=rD2L$!N@eOpEY)jWQpB;#|x*5dn$)_-Pq#6<0RfJ6uus>hVmQ|!n>K(bDWbg%KD=>hzl4i{Sd z=1@s0g%#sJv|{fX5drz~=U%ifI&z$3$VUlMVa3a7JD~oHOVdBYQyo_*FSg3*d19>y z{T4lq)RJ8&+HPxblX%IV(JY^R_qTlFac>)xwZ2lJ82{a&yujN3@$#dv5%+Q&B;ULK z0o4ZNXyaSEUxc7Rm-IBG0z!}RrAHwQn;=eB!GZx43;uKf^2gujb|BtVZ&*s4RTNpj z1Q#>buj|ZKmP$%)Is%9A^**5+XXX?@t~)49m^Y z;H2m$IL;JMxhgw%IvdMW@)9<#gYq^x^{L<|!-QUOH_b{eU^r?GJS>tNb?mrum0gXR zYgrk`kbAD{rUbpfO5v*1AolJMbrcGWc&QHxb~fRM1|qXj$@UWQ0r2*q3?h{BwI5y8%?^^dFDo3S6grjk#<*YMzTO`6_BFhZR>#9SWB@H@2rqq=q|cR3w-BSPF!SQzjo96hXbH z)7h-WJJKm!@$43-YYfBI|w`s*N#oWO-yZ6pKUiB9&*upE?KbFneKtFkbg&YO~gXZw*Q%aqc z^!}4~p<1NE-tT+9s3AwJBi=jk;+VeN9NYXt4z~hGOAz2y%K7{^CFg9OCxMba$%Mz4 zbAg1_^M5Le5s`ZTFD};jtFQj~tN-@tU;J-elR_@8c$(C}z}j34#twjo7*H2rF8PXN z)bG6XnkhU+=?CiA!|j~mHF_zC9+sp4wXKyIjEpxI3Gj+MK&`cDQ4IJA=OvTBq4Jw) zj!g(C1yp0KXd?A~O`CXjoke8Iabzw^$e~%u8J&0 zDh|bkfdk>?-K=d?5Sk(Zi0MqUtV!S_0{6TGJ8OxTA|u+{F4#lrL6wQAg=;s8!W>VE zs-^Cbt1|{JOQl|ITa!|DcPbCev7T<%69SA5VL`pkm1Md1F1^)6q~8?na6G{$j8E@z zm2nF0KK{FZ7o$>XqG@N7Py@_~Fj-3FxaXtVRK(+{9unjqK3?t**TdmY0&rkLpzf)%HtE&5RKh*Lq>N z|5bUTxL%C`?Bdv4e7cizA05cioD|=e4Hj>2Rix5MEKOMttf?eyq`DGz+*%X0$>ri{ z{eT$t4@0r8Jb30&@}q^I%YXdpul+OF2H%z=Mc}mL?9aK00U&v2fw}?YvJ8NjOz!(m z!OY5iYor^o&oD!N-!G-~rmFOv0r{`^lsur4+0z}24;9D2IPj09ukDbVn$j`g< znI42V*5Wd-jH*z#LBmA^I>0aJ;(d5bU6E-z>;nGGnuf&EAAgk;ClG$;1UvnXmuPUN zkWT!KD=~yvFB2i#wXBOPrX~mQD`EXUNk<6Q!~#NWuEM2c(AgytOBclN$L+ecs#t#T zFy7DYCqDEtMWL675rw3+zNZxebRS{18u~tZ!7GGFiOe(>Q01^TH=AJ{AA#k@8I8ZN zXBXIqs34zYc#Ce16_HHRY*H@imy6gA@U7S7hS|VCE7McAJA^M@3+SrfTk~JUXNrmk zi!uNi@7b;0!oXC$>n=7_2=A+$4DJl?um#s|6(YptLt(zFM=^+G%Ne1HteN)SvsWfC zpeNrPPwp3}*t_9ly|1JqIIr=-@BQj$o}{>({KYknRgLBnl#R`2i{Kf^V3-i-#S)jK zNOt|;QzX(4#v9=ZjZX#T{_2nXXZ(qGC)tk$IR*)me@xNm zWyja#i~aV!VN1;aul{@fUu^F0{hr5qNP~i=2a4`&#txN~1^odZ5cbc)ovWH6ncn{4 zEqEfj0uq!BDI@AO4^~)Z;v2}fy^6uS=*6;#&I#8zwz)qj{ogkb02F+Dw*2Ht>;=~c ziCaH|rM)hkNnkIDc;VqLOowDMbe0Q;6g*+YRH&~F29}=-&N zx*8YE-PWiS$!t#E&f=F%)BZo7ewH(Pnn97>t76Y|Vczks5ttD$>MC{ZgQaqNbArg4 zrO!nCf>0wkB`?_)YCBZ=5E=zP8EZ>EU_GJC8!eO$9T(MF{8p{>Jm(#i?)p(miC|{B zOPaE=mKH!N$~fRd#nT4En{tK2U&TO_g^RUPESQXG$x$4P!5qZT$+hS4CXc=U+`q$b z$DEBdUV2*U4(Cj6)n!}7&5lW(fCB=c6HmG~i%Z_VwWJtVt&EQ*Gd*7R@^CK>IvKEP zL3IeF#Fl`BXNluA1bzpiSB_)E4iecxi~2fjc)V*YA-Phq0(T!j`%e5jWZ?gAycpxG zDm#!f4cr`)Y}oZln#(OwN5hAB5|uHz#tKWu&o}~zSq4pkosCQANu^Ktkg*I0oopF` zBk-R+vmFRriTQi#8wy(FUX{$Ea=o?Xfyh2}|M{n96D5*j+*s}Rw~ao;g51Bw?~x~DEp{L>ZKOZm_3dwdyVOUwwi0ztj0(p!zAXCmSVUo2 z7nuCi3hz|i-2DxeMil4z=(e2XtMM>Dg0Qm20)Te{M2?qobV{Tj7%vdt^GqjPpV-(` zNLUCLA;Dw&h@|`EDquX%1$yaUD2kJ!q2;7Ko__)l5vN)9lXo60gdFG?AZ?n8z+;S% zV<^kiFgPL5;Km+2jlCP4oe)Nk<$_K?LJ@ybGdrd(B;MMa4P`_Q%TXiDPG~s3(DMG= z7`1lu?2PytgJ(rjBhLZGG4}Pt&W_gttQA+B$NIj;l6M+6^>#W##Jj=HTCx;VQU!9% zP7aR|am&vh*ROK@f?x4}>A<(5F4HkCjQSnV42**+#XYdoSz2)~duii~hS1Bm31+yI zNu5ysh}OPLp~J3PO{25*gD;YcD7pRj>yf&_5r_9mwN}t6qN)e5$jG)pyJ4m|tbiY+ z1eLJeA(=0Gs}TtO-@o>c-_s?fR{@^Qw-699tdPAd#RMII=8e^#P_{76>^T;6G4rTS z6a>om7dPW;?*(CWqSJO5Sg5l?#7c;4R1E*U3bF_E_MfbN*2T{~Vy9zJ=MAswEu!Dw zb>8+$=OC&zOeV5k2+ea}paj!Z8$yEVTV{o%vo3`T3mMA&RY1zAnlmgw?-w1*MrS}k z$AwCu@LzdhL}PRgMlb~Pe#DLsC|y1(X=I!-%*tfx!CWrL)8-d8D>K)J3P5O)Ld;<# zxhO1aOOKz0=qx>13e$Q{MfLyxf4vNiD7c}};x+3Rg@1W%y~G)bufOFE%h0OgvceE} z?>P~ZDa;zBv6V2noJr!SxJk*RbO5hF$j2emx3#nK^ihlihPjX$g|!f=_yjtAZ_swL zn6J~_QA?kiJzu3!aP_*dMTlm^qaH3NqW9$)JB`w>F+?)vXE|z2fnS6eLf~K2J$*lF zk$z8EQ+gD}rZPcS(zG0RDfXCMOzyb@tRC=d6+f!(s{fu>Kt4Xo=mnv5cFiR+4Vl)JIo<55|T3bnLs$E55i-r`c=tres zQw$W~USrXSg(YB|OP6~vKfy(hf5+!4IY1gazX1f6$3+47L4h*08YZS{f#?AW!(1qi z_AXv(tuY;r`8)3f^*OoJq146B_B;e3@dLd54ey#uaPz7wgGS;t2%D-KH@HpnpPOtl9HY2R5#xjK0+uQMN7;aN15la#}vEE0m%TTi|VrUN^=9{Ls z7o~N1ukrVn1x6zmI=jYd0tJZ(lPy7Fk0$2ou`#Q}nSA40pLxQZ|Jl&y#p;0DGY)MJXanSLKbfPrdra2!ka)v7w3XX zm}2#jTq~GpEbWrqqI=KexI~hm)8w>_lVtvN=ZhG)Oj6+$cL%G?R$2Eq1e zJ0OH#Yj&v#KPh4BZfipB$?GuQb9mZZ7Tpf@-7s-FjkVrgi;+{o@Z_inDS-C}u^LtTI_a?eSDg1hxf5P3&Q%&V^BhuE+Cd;Rp{ zhul1|)q-|pB55$4S++U)q{u1J%DVx5FuL)OyPO@)w0x_05h`AWkr$yu9f4of_klf} zi0a`qjQH2xSi`l-!7-f-y&9L;rwezRuBv0al=x|{lgPROI4bK3HEDH84stQb3PDA< zJ&e(h;z#TidcpFxv6wLC5`CMZwOsqHE23ENqt6s`*Z0Pjl7a}9mc_(uICT(jbSz1r@U+g`5Xd|y1)O?<~# zKDx4b31~JeFQ`D-l>LST$G_dd(~Ehk)RdD}gHdfwsg4LaCG$N_@A9ZLVjvNwySeNZ zhIoylv-lKRl725&=qpZ9Ugw)?^zDJIb6Z%C+!oL=wq%WnETHAP#}cNfypx}O+h3m!a6Q=@8-Fk{ z#%ht|r}8KM;*w$QtO!m(Vc?0^^sBlHvWwO-usRl(YF=@W3UYT`lrc@@IgfFcscVea za!V~RNs5t}lUh=J$Wh}^wYx{q;`~y#I+sUp$V0g7%D)gphDL=SP+wP zE~ZHPs5GOeLvO30Ry5gr)GEdEeq6u!vZ7yDGX?%Fzq=DgYx_iZh#O~tRIafcg$i`P zP0eGBGp>xYi51f*A@~5AjC1UF&5H|5tQ1}$t}JirrRXsTauvt@5Y%MJkAPRX6!s<- zQG_rVb^iRoqNSGE0+&`8*-mfGHpv>%NWV7IDCzt#;3TyK_9AV;z-d(XVR1WRY1vv=StobV) z(#6v>{*k#<%j6^%uO+q)T9}NP&yJ49UP7)s8W0FHUSykV5UACh3nrFL_F6rg_o7^v zelUA)jc~vt%bt$jH}FG>Ec~`^5whR!l#p}hObP7bK)hCza6!i2!;N*ESLOHqO~efv z_>CC_rJKca-uWUI3A+T75zrkUrkqD8uqt@ZFc8LC*+XAr@6Ulv{IqzZVbUcD^oh^qMeD@q$qA{Pyc;Gm80ShPp#1 zWe(gMAy*F>fprf>d#&+=*MvJ?H4XLrgSf{=_6X0p*JFf)x3+gZnI1Ql!wi?xjD-O6 zj~%*?Csa>f(l6{m=qvX z=jxc{FHRm{xrIXQ5xBF9Vyqmf|HT6B7~W?|=AQr(;1p036icxtJepGsk5FiFxv;nb z2Z-<@m+HIRH9x-l%lP%($1e#Z7RUJ&MCSfbLG4P}dJLP)QM>!RYOn3>;#=}30LI|`71|QMQPyqVo=fALyRUdJ|V{v zW75Z~@eZG>n{G*0Bb}{V*@Cw31JkVrBEil#wv|4hV#48ElM=L58CQ7mT65`kmEk#=1Y$P ziW$7u*c_|^+v=zdvBAJHY0dzlw^Biixa7m_c`+(`a+R^UV$Q@+OW<|e_a&e*PF$tF zZl(X;APk9776SqV!@8B$yLGTeZZp#)uOxQiTSZ7& z)M2dRoQ=a^eOI%Sd7`#lzfLa&ndhT9$b#*0Fvwaq0vhuOz3xZcvlSyx}D7%CTcV>on4(eksW z@hh&RY#p->OP!E$@fnxc^H{|WtqT3k2K&SGk1gBjd!ghkm--pF7g8bP$j{(^40+q0 zrulY1Mn`Xs$0hIE5q}aP)0hzlcBZ0_^o^#j=PH@Qh2+b~`FC(CP7hPODT_3WVIK{DTy7wHMjP6uVD9{pBBe`>)?y zZLi^r&0ToQEy*upK-{};QipKO#GJ4{;ykF5rbTw9<*p#4wvap96u(PsObdp3afP5Q zCN)8V+oC0I5QS<=0&0-$Q=NgOr$*&bspw5Vp~`J?6N}FUM@~;5r4W45DTNM&91(W8 zKU2QX>p>~-{YlGnP9ikE%>m_bc}{!qil)IPVyU)|v3d`mlm-^d57N{ae3=JZMV3nE z6|tMJ&*+I*MRoL9jwepum1xIaqcqB+PE0+i>Gs#-7t2qd_*^TAO7_y@Cq3u#P>wca zPyVnkD5L+po**c($XtV5&#)Bc@s)>tR)W98vE~)bIY69+64n4SUymhp?qyv&aFpk( z571g%V`Uoi5Lp})KvA3%`w5g2pouN^>dX!!Hy(~h|0R}}Xkh(N+z#sg%ISNh!ryXN z5}xE<_YagZz;X%E(0y^9dCHZ?nMkK4+PK8blnED8DhUQ(@GnNC*;tDKkj28&un?36 zhBOCTfC^^RH-<6~v3RmK1YE>7?Xkn#Ni{^Z3y(4IaM%|#J-DbE`yP%fUP3vktBGk3 zX4j=aQMh*cjd=K-wF2`)N0ga$?Ltv8$nlD4aBM+sdC&lfq>nu>Vc}Ia;@7NKQgaCU z(2M^4+GzR~{vS&;CGDnwR(R>|$5x!%B~eb)zl?)86QYvh$rbzN%ImPZzvnk-9m-U< z4?7+`BH1oD4{%PyQ_NNlg_6}pZZY~>t&Cp*C&I}|T;lJO-i56^vg*c~WHq=4{htU$ ztD&zI56v@4Kbhke*FCM+{o2aC1LF$X8t_Lv4RfwBtaD^Dg{;D{`>kYGEIa(#C=yzgww@&9} zSlwH(*LrV&73hHG;TEs;uqZe1gM<>U7o78*{a5RDQjk3LJr{RghVrK+$0S?&JZ%j{ z`4sAi5m3IuEx=hQ-Yk@k%1H{X;+d(XNvw}}(-=|w*#&96peN=Ujy;KQa1&A-n?E2W zuInqtpcJ4Q2zeFgs({2;UZ=zyc$|MAhP7jqVr_#DB49z8Y)lu@S&(Zp`VdvcVdMo?d0dl|oMUusC#K$uSp%f$@itj|70qi_X>X_`8gY56Vvh*k>Mu_Cu zqimfEP`uPD;XxoBUIcRAXnilyCCwyf1>HZpKuMetkMP6E6qeM$&^)7QbKv%fSD_zQm*9;v}K#!I>G3Athq_yA+ED;kBFUM)Nc z$?Pmm8ki39-1{Ct z4@aT^s_N^WL)0s=eURYn=1kWZnqqqKY{5nuUkJs@(sE3xm9&?;?AL0vLh+WmW6@FyS^<~2)zD{+$skB^l64ICbN@7 zn%{l=;cr+uvBB!TN2Dq2n)6byL6Vo#TjowQHd~e{rAAiQMwi^klx~vqi%tv$sp^_r z9)=uVhVXC$u0<}DSDuwXrPoJ&0ff?t8NQjk^J?Q00J~<5e9x^8hPQbex(V$X zV_k6?xc&UA{o`BIUxr<7#^n-6-i}Ilk5`kVp%rwrv_|b;aRtX=L=Bih6P|w&VEG2Z z_MYWE{op~K28hAXnxQRthJwKHNFXhPqL8EeN5W@X-_=aUU(fuQwaI9D;oLlz*$J;< z=j0j3Zy+j%^SF8=^Q1VLT!x;JdQ1r^Vhm(Rrd-q!k0X=*mX+@=~UHc_JACT~{G|a(EJ?+9MZjb%iY@ zY7{PnSbT0k@w<4@P-a4Al%4C5-njG^))enV-aSc3jxNvCJs{SkQ)|`PTVN6~IC9>z z(i2fl*u}UM+Vj`HVJSTn(0ESrXb`+wL$IeMx}(m6!V!6eF=&Wamy3am5C*sEeS@j+ z3Oq+jGJo?c-~P7eszM^A1ufJEbXACH!b2C+5~+LIO|5H+#X0!+Su;Op>4BSHqk2zH z;-Oy-@(zVO-DCyoW8llS1(M1hZD5il@-*I}^V|W<%f-X8A#QnDAg%@P>Z0r*%7A_8bO%|@9n7~$r0FxhFFQwb6`*W9- zmsiEP1v#sDpvS&<&@GQS>}@vB?GSPgA3i}g)$zka zTzazNj{zNEP1g`Xixb7T#L48#itvqOv4-S&!t|h6;|JVHJDo55e5QFSgn7(nr?a;f zqZKCaM*Q*~+wT4=YZNG-93fHcVy?$5=j1Tp!eV-bz!m%C6ew^_<^#8@^kI9#DrG?z z1sp|^C-jT)XufYsWjZE!Qtdd8t4Z2BSnpa;u1N+_Ib&6c+g)d}3W7+wH1zd1zvU%q zAhE$&d^F9bjf6)az!{W)8!jTak`^Sj8;i^hCmb2QKNkJf_Ig3#Kv#n~QmTDv`IeAd z&Z>>Ud5gX*vS#Tb=sJn4_sDTT}y z9Ovn_Z2=LA$#8l8$JA{NK1XMKHAxj$0Gt z5?7^?-czR@f{Fp#*^FiWwcm|@`;~9Si{~D92{CYx0d_VVXUqeO4olgqa6^G3UP6e@ z{>#0-XGU3+THfT*p_eH4OZ6hiZ5P2@gIg3hjCY8GjJERIb_ef8QtcTnFFo|@j>q)5 zA|m2dn)gML|F&@6Wd(D>k8;@W2)TKNkf4>DeGj188Tuur2r{o&qts2xIE;-Y6B*BG z#YRI9^42R7zt>p8^4GfN@2!oX9`VLOn#BWIIdKaO*4AwNPtVKZf9H~ z6R$bv1#Z`qSsq*1RbuE?5K$Wty#2T<0 zj!IB^XZuyA&mTT}7RR)uyiapjL`ZfEfv}3ZaBnN+;l=_ooX%?L9mwB0D|xLGMY3af z-JO@cnC?JopG0uMZ*Q^Hu{4Ah2c=wT@Vcr8L z{Gb(&F(?E%rKe>Y0oxnf>uX-_M(EdSsNy)tbB4Pi0^&x zdtL;HAxH_7ax!y-w|6GY=FGhm4C59XlEg^~SQeiIq)^}oE31gM4CCX0bJL?|b{O&hn zoW#78q>LY~52hvmj|gC80F;VhvwbdpSELT(J9c^N<(fC>M>9eN7pL`VkCH$CVtn!5 z>odMkg2+!GfUk4r`GO9VH-|6n9}z<1?^vOh;wZ>7bx<(oje6?>T*9ipDf`~&!WD`4 zvv%k{=f7SP7>w0>Y2JvTNsq{k5K!|k_DKB2Dl6WxfYDHfNKpiDHzXZuZk47trKmK- z@&Xb;4A!MgDpB?mV|sVwN1bxZ2p;CQKUY6)FB>V)H}3jJjZKX@ZsD?(V02sWyk3wKA+>PJlZ~V@kNQMDpWCdFYOSNRY)wU z*W8X>QiOiYE_K%BN4uCFG3vw7r3m(UaQb05a5*BnZBj^FG%$3h*OL+|eus}cN9Kg| zudF0t0++mp3ia~)_=^JrJ|o7UR0Xb>cws%}bFRb^;i30KsU7Jr+?NsvxrVW(+&rEE zQ7Hlc{VrxCz+;Vsf`KbYbHSduz;iutmdaX+^r+m-D*4OR!g89?D>gP{msfe`A;hL! z!#uIQ^?nRkr4^6kkHImwTr4JCp+Z>6^EFIxd^9*2Si=*}Z}CWY1mgRgLJ`y#Jxl}C zt31f-Jg*iqkVFQ1ZUg#dOA)Aaf%6mlhABq4D&|Ktuyo1Qs>UbV>wEq9O~V_OBZuwr zPj{tZ;@*K(-4v`TuZzvvMls+!*OCwE-Lj}88k;v3FH=3+`Os2CW9_%)273(?m-VPt zdVZ<<2h$s8PSV<4B|R4J z1j)gjT`2(mKib}{InwPo)B6n`ha={4W{VNz9*HCxK%Z!dasenDx^ZGvbvHLDg{A-$ zfZ8||2%x$ft?Op^7v*ZR*25R+m-*;TIcvzIy)HAOONP>7YFyfRUgTVAT9W)<-b&KF{4;n5?%`_1XFn&Uz!*@zjr0kQHJ zC91HrykI8TCZ!LL+-4G%`M9e@c#)tM={NMpl{CQw_i7(KOLY6iw5TB+WwHKN>AGk+M}VpIb$VO0 zAR(a>O;jmZYjPK~Hx^_PK2!mW(kTZkuonEY_;|^*W7b68 zVA-g4UqN)%d@|uB$`ik4^v&jt)Wdu9Y&lfU;n-kK6{UYUOmA*-bXMXnh>aQ>91y3% zJqjLg0d5BawMZB)5eMA2Q`(qZXXp!sgMDBr!-Y^A851X40>(mz*O!SqmVom)F@%^R zR!Oa(KJ{+8{6&Xjp4Sd`6o6?uajag;qb-Pkq?eLo*ey5uORtqlo2mqeUu1JzpEU># zOr@He$GyapsFQ6P5;|FQIFCukk}8hGCeD12fk1KEA}kxAhF?p&r*;K{4A@oF_fWGzMX)9cQ-%Ef)8E{~PrBrx)aC5M;9rQq*LytUbiftY(dHZ-} zAg1`LS3GNDlc9Ak%lP1Kn1K_3+{QZ|VzgX@6L*x00|-66$h_VD%^fg*=J6`p6-)dP zeOh9P*oTCHGHSi>cp-L)?*5zmzc7u6#Gx1}MuVCwVz$nFGYq##^s5G`ji-ra%fzO~ zWeCPre?RS+$w?VC++1slX{#?!pC)`x=?AZj))%r4iFGjn-;;&XO>BqmCyc2qX-H<=VRQMd%5NpE2h(lGH*{S*lzz*? z!gFgfg2r!KFZj24Df{$y#O^EDM{YPx$DK;!&QHz(YWNMHcrZA98TVILyt1yeS9~`5 z)A_J^gv{fwglNxcqMuflMht2^8tDadE}dWq6Q$jtW(Wx85~ZNA>&YpIvLG=-J<`Rd z_=(FGi&IFRGmh6^5nKe z5kal1Twbxj=p>sNZ26(1lB%h8M+3 zzz)zX$J^}3byhlh?sqa$K$q^HW%`%G2V%{^I7r^=>^BHsP^+(QDs^VbY?*8fIZ>_{ zOT~c;PMsVcPOk{cSTYeJhu>~eyd7SYmTK%lsL3`JEFK};>=`}mObv|DqB4P{5I`YC z=P%F#wFI@)z94qdN+|pzg&i<pheJ)8F}9;9h&9MwX)M&44~0f2X*5zT0V<{(Iu;!P*h))! zQW5BwLydeny+-c_FAAxa(EFeKB<jgk*Kvvw(kPAx7? zyJYiRNBv&7Q5js+Af8D(r%)Seu<0-0^N?4^;P)#R7oG=M&>E$(L&Zkf^qlBZR+ddw zcZO_m#&EFT4*^6wBuCOu32-a(gA^&*KLPt#gJ2uJ`9La&{#eo@7}G?&!DNo-cU|8g z0j(Qq*-7k_iISx5d4{`(RrsD;yKPc0-g4?htkkl_G%CboAPFw9UfZe zI(Fw1J-;KI>wN(X)3D}rOQ?MMM?Y$}`=;-s02byeMBIgD34ZPybY<}r`kfu`8)yQ^ zsgiqcq7pXcp!@`S!;dr7zM&p6m-DKJX+#sMQoVS@4zM=Y5|IZ>oIeFxg0vU@RAdNg zpPI3?%Fb(#1Q`L^_TJCwVIHTt__R1Fhj--IYiO|F4FBZW)&V&T`JF4X8~T(LGrAF` zh;0*dKFzPZxS>fn)-P(gUQb= zE|)0R@Bj}oJ|}RvhcWJ#pz9Z-RN~>_+qQRwaYaOVq;OO%|Lu^@zHbd3LlQFEPV#jC7)iNrE5b5U&YtKKbys~+c{(! zv2WmkKoP0FwP{~WYhS0{Gf*@ZSQCnF2}rjj{+q4c zjQzbG7b|vWm;n%+=T|GS-6v17)yGD!>*v14@n@l-C`mgex^0yLm-syb`!C|!jc)ML z{Ddna2jl{SCsFUj+lqdHn-%-1eUP~;FyGw%)@L!DH~{p^*9sQz=D>bSj~4vVS+3DS zYZh^s&u9@-vftbrW!SvCv(oKZ87f-bW@Y(sNAFoH*BZqf#QfIhpT{|Alz>CWJ7nbg z@h2J?b7Ed5F+|)%(}^B*Zp@@?s+r|_M4fkw36^!Rgoh~#CcZQ^o?RS`5Pj)rsR_|h zIVfeBp?NHi;5FW~h0zRiOtZMyVQj4M(w6j$6SU{mV%Wy_11} zh^9wJedZGAwfOW=kauahIIp*eZn|+54ce!&zStI@{@1Vm^wa-#U#d~mGNu~wjBmo< zil+mi2(D%*V3{!4(h|tba}Z7C9+C!m8*=j3KlkosR?I^xn8r*7pq7iM|5j_=Q#%%3 zjTyY-m?DwWtVV|IyS+G|*|_3331Z||MQN{+A_`W5->DESZ~^l4H%vsGs~rP`uRKTW zM4d0;y;v*ekW0=WE*Z7T0S`r(q!S50Tkj(^*2#QYMk4mnIdsapDJ)cUVn6$Ti}s@zd~3MD#K)N@X88SAt4aeaj(> z9SgH33&c>SYof{iYvV2&0r@wW*loy2kVXBWvZng}UV{HB$}fU=zyd#j$q%J5V`h5F1{4nnM)#27_1- zal$|G=@9N!*?rOY2#$lf`dNGsmQ!&En|4N7TuT+!Y_YPUI#R)JirAp-l(@{?q7GiP zNthd5#ditW6lk4ty)+Q1B}z-r7Gg6B=lD=DMd$M}ZKSeaC|g1~?225~lx3eSj&1ca zx4RpAK3i}u<1aa_j9F2dkUcYzc$I3XRg@A@jeqfg68W|NkN>=e{yWmfeyc{Ewkzd* z2q!~bEUGg0KC4Wv$^|jdjG|zM#V3oF;Ip=_`E9C6FtY0HLlJRSnG)>Ji67Vnc||rm zco2QjOCOVtjgymfvLum*rG%Cg>w}RP8y?H+H?WFlsqL#b8|H}HKfT=bi&xE;O4Jn3 zq$N827#1yVE9E&HJlQ8dwzuPJq!KwF0qI=OVRI6)Pm3KDVgw^66E?IWjoR z#xA0p$h0y_A4UZ|^wY0?P6qP7@_*yRQFyfYzM0QSaP2U_9c&^=cAgHv)_T{=qG-)g zY;XfLS*c-pfr5atvuI9{)xhrUSe1DB=9?h!ST~ zkP)<&Nv#0{HT!;p`i%oSy9O^C#@zRQ-(P4GyD%!ez3veWxBLO-WVN!KIN(_b&7>r( zFjo~uNs_|aet?>YAcoS#P@9J|&YvtV`^`xBi;^tFy-<|MS8o*%B6q@%CQwCLq%(vJ z218Ey??}W}u~b&8d5HBB%EcllV|TKJYOlBrF6*>Nlr{riJ2HWh@dBUJ@EZmt$C}Xv zlskxM19=c{C3QpFt;~rCBvpW8=rXQO6AD44FXbjWz1{~}hp+@!>wCSmo53s$T_ebB0*2>j-QNog#O@j`47qqGE4K;qDY zBHn!rXT`r!hIvKkhn!r$y0U6gw?br02#Dl$AIcpn0G|v(S-I>emN+q;&!ihc1z@# z4leXZQa8*vnVkv#XTO5tN#dvZnf-kz6C8{?q%LzK+RC5YN-qARTq-cRv?Sn!q zUtr%F>=kdAB@E1QKpAyaN_JE3L^Y0aCkxs4nb8#cqE~ACbIVQA_&zsY2XAWcfQqRh z5HS|1X<3T#gthU7gLRiPgoiLOC9%_~rSk(d)bJY^EKOX^ggv3C3_%tgLW;Cjz;Yrm zN3b#GQi_AKumDpymhR6-y=DlUD|fF{Niha0bB)l7V@H_^S}Z{wrr-VX@Qg`7b@FosJ-3 z55aW(*A>KZsWkxZwCsbzhJ6^3YU4$d|4hyiT^kz0A~=wCs9^WvU=AklYrDq`P2r0Q?1#%YVe+NBh;1@M>iPv>C(~T9wGwthm4g&_H~4d`s_NtQr={zN5M&xyE&_Qc^sy$Gl*v0NmUG*I`<4%` zkpjRxh`nf`=ca#~dpH}tj+JN15|Xl+*+=E(0~pxUDG8#_X8u&{6CLv^Uv)6&k4YQQ z#;4lCj3Ml_G{&n8vQY6_(|Ponp_(e52*Bc?=n}Nw=pY>ha3Z~~PpQHGG_R`?cGHt; zfXA1{NO&9#BT`JVR+v=R0-Ivt`iu{#m>A795!ok16-UH~X*_Z|Yy@crlgq*&a3ZJ* zM=^zQhxg*!_aA@6CBjgTt#bwj(5WQ^2Bl4`S$kb6QHxu}(i9UYr%LAI@XRCvk4cgV zm*_CHBN4e*8ROcLWmSm3IjkL5xb>z*10#_p_qAU+e_JqP+oWiUCvZTf)widW9@;RW zMh1CcN~kV>PQ3*~t`c;jMU7+PS9in+(tRF&sBONM zo|f3ciy=L8LSQaeT2B|GfYeJ-$&y1 za)g4X-4xTeLur1za0LWa>O5HabeKKqC|(?=2?~IYxr%Ct zwU6?m4V|@j^agv==e0_w?L;Wx*Rh$}SwI1wvLogJ$Y*!b7=<;=qa1%A^b^Z^LCXKC zbpHXuPG{k2Zxd=E)=>Lo;;)G5f(1H*Za+>9@KLQ$+!@d5Qo#VOkCR`B#h2oLR4qAh zL~TloqxpKe&P~a^?Y9oQ$pz}qc3wmy5myA$MZQX~o;V#r$y&>>Ie`Gn2vk}~etJI1 zGzdrD(ob;%XdLNJA_LvR7M+d6i*trm?}%!o&wGJwWWG|UoZ;2B9MpLG@J_HEh5C5U$%vUw|wFGEhf_U}o zXxIshn zyK0LUtG@rZwD{P3wnr~)WV!jQ`8~);O?@l$AAk4|#F?I#5TvGbwxP-K*;dM#S%V&8 z+(C(BQW@3HUK9OR!FQdVZr>DO4m19jbbtkgs#DrAnT#8IYi6-FUAZQ~-|Y3a*W&s2AHVfu z^L6WR6uaF_ANu~|H@+25S1+03b7_)1W#aMIu8ipTfzXd9vx4^=rm%$+WjGJB?%~rC zFk}>0S~S1@>z~J3Rc`J>R?-O>ow}FoHORd>bzqCKaH5wbs<5xnJ81(S?>fcL@%$GU zkfsI7;j(mjXBwoOOEZAtJas)JN(j7ELa(XdCx{_%K%pDahFc)ZK3dE#_@k^G|&sWjF}(e zH}qN_pN9pvGJB>z7qtIesQ|X+*LqQRRB2~IuYl7m5#V=nVx7V`yrv~skV&vBk|1+r z5~>E=2h@DT{*S&kE)7?IcwQ=Pxz?2Wdn5>$waJMm2$LF~9Aqf#_^NO|wZ2Cb z;>|~_geNn<3eHL;bGg@C^EuCH?M(>8Z+>QCqb+=3le1#r&fZute;^nYYLEMJ=z%kc z{erC>qo7NU4bzqI8HSZP_l@{Q5UQXZfLqEao%DP+IVYkLZz2R4H-|cDq6KQHSh;!c zX0BmBVTd{`7-LHqvuYS!30OW+b}ZQC{^MRZ6aE+@nAjmVxS20a=^c9LUtA264Ew_Z zcp@MrlZ&u{*}V-MJxl|cog*Vni@wTl2m$3?p#;SI1}^F9)E~yyrYZ=+c|FR?sua&> zxa|L(VagBFfl*b4rC8xO^cN9XRgTHQoL&apN&G_DM&Cnc9>2%ogLDE2w5= zbWZtSxheQEI3z?>eBk}FI}{1tTJY)Q?af(rc`=$$&()JBH{ed`#B+|*2Kl^>7Fx%+ zd>=P#YtT2bRkwL%ep!)$cX(QYKpKlwJhoXee2{>yX{i)xBO^ZZiBN%s8jp`+G#B0; ztCmc~Pd+7rIdc`V-BuJAq-aA1;s=6pPRb9JbY${roT!rIbO#ugTdDEsBv9fQsa$wJ zEdjwj1vS|20kv17Jew&pa#Q_im`@C9z^pRmpf&CVQ0~%;LGi z^Uien_qsd$t zX9MQ)<6aU4J}CD54^qr2G=&V~5Eda8KZxyuy$r+CUfq!z{+4@wdj99GdeUF`U`SgqXzw@J-_lSY~p?VVpiSS_-!-DdxhRVVXU3ZYO;aFtSYBf={h5uPo=w6i`SI_?ZF~IeNjx8O5A9Y$ z)-(=pNiSa+g$CflAk2=21)1Uy5|FsyMAhB|&6gDd|u; z%|D9eE_%b$-~>L{n#I9kDJ0# zg4&G_W`%#Tpx0X|%eQFficI0JHAOzc*_44v&~jIARTm8anX+u^D&bJHHthiQb^%pM;qMo&de!r!=5F&yA;$ zpez=j(r`HW?0lNG4E@2 z3U27oUy=>lQjgWa=MaGL!EZ)E4bP@`sa?s5C8qYbKmShLI_2SO()otdf%twmOSw1* zMqA}rr1{gJYMG6ovpV0AdPprGY*EmZtYpS^I!NScp)h5%Z)Yp_YsrUIl|=y%hY$}V z6Hv6{_!5h%U?lf-J0K7Sb$nr!2IJoG4)iOR08Tp?YqFHz-Xj`Zsuo64b#AIdJqsz- zap}>xf+*q2R20-oE6nxT9|4_F+bR>Q3r^`Og(oUR>eOnIsXaI*9B9Y@czaad2^X{w zXU<+B_Jxt=usdaQ3h#U0E0a2;2!lu>G%X-CbGs|XbJJPOj4iqm-uC1UIG=%k4)m!{ z!qXu5tA4j<5-Z(vqtOB-{6Ji}m|uf7t0UNqbdD+sYUK1-BYdoQAPcI?Z&1Fe#d~tK zox#@LZYNu6DL@3<`G^}z(hmToG6gD)aq2}-#)2K3+5J8~Z=9YGe@rLHeDUV6SYX7m zDDjA66I3e!uJ<2*SRzGWL`yvy>ZSC2THWvic7_)YQIoY{NE`79S*Tv@0SrkHz&nzp z1k(;*S0==Gyd9;kTHWrN;m8Zn5@zlmtP9r}s)Jg|SQ5mpyuJ#+$BnNr+p51u7NbJ< zCh$T^&-Ja{ZakhE;dngtU`|_;JV>7ht6(Qs?9>1B)!%t%fCEN?>-aa^k7ftNkx>hw zc@mjMMlb4$#yja3u+ldo3~@?F##4;+IR{<^7dij)*S=wDPnm^_E}e_09T1EtEvXd~ z)%0LrNX)ZxBM88e!0>W`w_vpTe1uzK0Of6AlASlU)^0eu8wHQ36RHnUn9;gJSS&0Q z#gx7H8AOU-r7!^a$WK9Pw565ffuTohls+?CsLBvfAjJ`+w%jfjPB1RI85wI#1yfpz zE4=z}t&e19{J{sRnQON=c;Ls^(3@& zl>+2y$}Ov%3NEK`rR_P8yvn5cG%1Pecq>AJF`?bSt4=rDA&;WC?blGXRit?Jlw7|hg-q79? zufhDy*b1s;gJZA|rnN=^Lr(+7EgkymTnvDmT#2bn0jG+LO*}v-*m>1l?*bZlxX7l* zR&UkcIUvuAr zaAZ5{`>zH;WU?CA^$7()AvX?7rG&UE9x*UcjRf%S6`%rru46L|(PQNjk`5VOTY+*T zX)s5|0Q|NIwzk$Y7egGTlNh&0eFT1$Km4+K4eBUB2)j}altL|=Ii?{4&M=03@R^K7 zQ(Vr!InOeAr#xNk3G4C`p%P%y7v*|L~TxFmQJ!t&F< z`|8ilCT-FZae*3VdyDIUvN35|Cubf!B%!fLWNiD6-L!<;ep@VpobdQ;Y#+?8XsFxa*xXcBT%B~}AL^F7@ypS+@ zf^l~l*iad}5QZ{ke3Hux3q>!^nik7I^cK}F1=Z`XygaQ?nid(6)fhBgtL+iFLx^fW zYy=gUmF`~+3|2S0Yd%Y;LI*R?P8?nk!7N2mGOdMVN!RH9 zNS5?N8N1zIJs1O-eO>9 zXD@!Z6<9FNZ)|Dz=neZ0z`dpxMKg0`Rm9`&M#32%iXsHK&XU6Lf`zm`@0 zKTS5)i14zcM=-V;2n4sU6QOE+M&SA0JmLsdSHdg2nOlS;wRNemkC%oXy)2>#h($4) zxNmbYhd!z0co~6np5d~-FM;EyVM+GHy46&@v2TE@mmP2 zL5K|rX=&4ezWLNJz3; zCMlN0^gt#y=S~^@*Xzftx-0P{Rxr9K@odV~Zci!TW8=k=T_2xclq04Kv&Lf9C8~CR z1j}VUuQ6w4bTv?Utmz?fPMDFhyDy8?Kmf1JHq0f1pmWf9;nAYmqpxwjrZZ700hrGR z6Ps%}n34R%$;h3SSw|F;JWOu%lK9&e_N?7YKe+q4^WSEcZSL25w7`kWxqh83Q}JQk zH=6X7%A*UT(Xi(@%@9&y$HAV!iC+~ls+xsKXYal$EXUSUIaU5FOHNuOD8|{eA<01`^~9N)WngE)wv9pZHY180NG|ka#9V!%~~pZ^tKX=)8plo&kciRn%E}1ECU{+cKWK ze9QTdYSksf6Zi}YE$zwK1+;HQP5>EXejv6sIk_Bvkns|0SF}1|?w@{s|4%05VN3DH zG0~XJ=K>;^5_3&@_g1z?%mTUVMg73(4j5h$uBj^S)ERKfYh9-u_-&N8jUoUT}t~{2p zA5XnNc|3u%%gHziug1KpPBbwvjMZ30oEw5*GKW8+ao73nDK1xro)@1jnQ2#Wv*U|n zPy?|Lz=kK~^6p?%(u^>T^_~81JfmTzOTds=5l+zsmp4Ozw1WI)JXnpsmO?dMl+Kfc zmgC5#C&gPNk*LUidBdzBOw)=%W_)jqh#QAvL4$BMtyV$A@#hApyY$v>?~vQEk;&m5 z#l-Q`{m0i-R`}f7;2JmNKso`pcdPF1`*~Hry*CK5!f=_%BL9uIVOs3nyz|n?of~#9 zjOal+z5KNIX4k8^9&sxxmV!bQU#Omt?8n~8$;gR_`;VXg6%YgIo661 zlc@(L6_$w;O*fT8c<|~)TrX?oeE5NLHv_LdpftTUVLWRhdYjP8jfcQcG4X!{8!UQ= z&^%m-u0E5LB?B_lz7NgtR_vy(f`$*kIc-rfdKBZst5n7opiv1{o;Vsw{n@lgeNn}B zwO~Fp(ka^`T7I5a@rViLLE1wSNIq+Zm|m9rq2}FXDGIVQpMV!w`dGF;klt)c!a4|u zBmyl3x^PVD0{^$;rpExznxXW7N2mvn=#JS-Xn4NPn za7C%kghkR5Rh76?gvNaREpmo9;Cm~`P757kwFFDP7+(0V^0bNQ+hZnLh7k~?-WqJx zslqri$H&18Ar-SeoS_BcO`D|2 zV*&X7KGEIgUCr5{Yv>CK>nkYfh<6_dUlT+^Be#P3cxG>M;!Ao#pc<4W<)mp#D*W@7 z^8Vm}Z-2`l*CixkA%$)M)p?lxV8iHu*0@KAlxjb=o)EUcaWt?V3M76L6P9#VK>?{e zr8Kx+9lVY$Y2*TCe+g>L@aP$P0=Qph3F83%@mK#4d`tj`n;g{Unr+2Vq5YSZyf178 z5&@OA9j=q&(xt|w;drOAB~7IVn^fAzuUY-V!h>cE8b%!}!X+J_lY0 zB>bZimpIjwR^e*0oSyrTSU(;#gM#v54UdGpkCq+17UV}%PlsGKIawK-+(~##; z0w`PcO+1!tGu>QL%pz$TfVz}n*t}6 zTv=;OBYyK{sA$CqHC?_OCZJH*mg0ewa-3P-D$PuRzZ`q~?q?wqZcCX%L+XzaawW>f z^;d8%s5Yc!fmvm9N(|d)hj=QPOjVj-{S;YxMl9O9qOeBiUnH1ZUwR46$4~(qoEHb0 zp4CLD6qu@RAdPG>MHCs1eQmFpzPsIlPqrB!B*+nyUW?-0s-2GGS$ji}9=0NJ zXz55lLDG~uSRXxQ+(aOQNZb)^(#%UnP%UTti#}1Cb9YPv%)^wkqB_B~+0Y|Mb{dQ9 z54u5n0&bLI#ygRz%&#t&{iQ3$6h65uX6E5UO~OnG{INrgJofr-Pw%dA4TA%eeT)@! zcJrN>3B(jK%El%tLIJ~op*JbhF%>!0c;bYv+Cks7?#inTzgr@LxR^Ia|IONvoA6L8 z`}O)s=rBKGm=4YPNKT@k_+~UxfY4tc7Y8W7*IF?|k62V&`@$zpM`MTo>xZb_CmMR* z|MmZW{~O%=**CodxfmRfcc455H!abPq#rX_*FkjnlUy*O$+g&!-*BVZmic#8F`h!8 zl_}(bhy95jX_ryv`de5d&Jq~eRxl+RAH;tArBBo<_-1K2+a6+|f>wy*rxVCz8s9i1 z4jKQ}NRar@s=9%g9-3`Gcf-0Q6bk%LQ_k-keGRY@64f~g?ZFr8#jspIZoeD2(TwS@ z&8yJ13=>x`8$GiYq)A#4M#s>L#Y?~DRGr=7TMH)m_Uo}t8Al~cx-QQC4!k-_@LmgT`dKez&Ek3Zbrdlx(&AtArnhoI{T~sV+{Ds&mJ$t4??3>M{9I6&04OfPqH~XDWYO9kOgbOy1Z%c=nUn(;t0WiBhPk` zyFE_fi{{7+%U$gD2mUJjjfu&U;ufkKlK)KUKHd1lq{#vZ<{^Mug3UT=|-M(Ju3v27yG$mTFz9JlBlQazHt?rYj>Vy1jtPZ&)|erg0wsUL&KXG@Q zed~uEo7r05T8%#-bN)PGdaJNM=NWG#uY+||q}3YAu7tI@3QDOJ!*j2<8xLMlUo2CH zRvg3_Bs+X)>*qM6#cy3z$4mtTy%jXb1LnWzj4!5?K+{yzPwJBuIWJEl2B}P39lfJe zHR!yfw8B7~1OWk@08xpRD^^|98Vv;+;?bvyB;?~|BLaq5Lry>fd1+)t$@2T>ZBEhY zBURUYIq`l82q5x!r5l&6tFx`|1rv0sB|!-Kk$VJuHnwB=1X-tPDN&lYy65x6_Nbx) z$x|-G?A>>k_~-01kifw^GS~e)3fn7GSAJj{%j326fpb-ha7CRzi?+wQ`K$Z??Cp{M znsr`@7k=S$spD%go9S@|PsB6f@w`@qhBP0@)Z4BN zPW$S-o^i9Kr_apM`yf;e?){(>fP;07HDb|l{TdR!7wd$f|V_>==Hs5{gQerELm zyHj_UmcTp6^^&Tm3A+lFs$iAxQ&GvSoGy-=j>-w|5C{qUWW1A_3r;0|njs3j_0IU3 zkmhmmJIUnK<6jva;?DS-G1Pz=my%c*Mw$Q89*Pk%@Q^AD&E~fgb?7Z&Bhc`xonBll z7D-{@+YMRhY)G29zZLWvfK3QV3CGp@ z&J8HYsm;5i=|`||TF>*L$fz_M4$shKDgKM*qX z4h4~#UK9aFTTHo##R=dZw(NBc#85}VZ1hrvtU!pebHNovDUi?cUJ*xCSXRD!EeEga zmTC_vh5Vp(;yNrnc^=QifMPE}a5~Hus1iD(UHsyeY}65D2TB^%9iVyCV#;jD470C) zM()Tw_0aaWnX_d*NSj~|hPzA5$B%$_6}P$imjNL}%_*i*{0SA3 z*KLb^)Xd!%=-r^zZ~x2B;`K_K(H}VufY1VEIhH^2RtE94Z+@qAzo8~M-!+RB#wrMc zW~77BTy2XpUB3TDt&q`pJnxCL>zpD$dweAs)J3QEiA^96}0>6=im8`1ykou zLK%Pev+_d74Ig^%JFvCM%NdOi@Pw_;kg50s@%bG9u(j^(6~LX^O%7M^z?$bR@LoP6~Bo#R?{mv-CiJg8RZuIi$UwMmW+hrzMfo^sEcwzx25^P0j)gSASlyf&DDVX zg)p8M#31=ei2^9Nppo1uDB%=voab34hU1?Ck9bZICznl-pHx>4X2yG{9R&{6H@Om! z!@$IiW8|s<(m1+{Tm1eY2#;AsR?_P}nE+6V3ka%&Gi{p=bf;t=<2Cd5|J_537jK6i zf?xY%JO}Ts0qlwJnf?rn{Np9-U0lF@EmOBrPC9;t&z`S;!g z)8b+nATFJf8JTbEKXayII`D!8E1*4B^cp_uTKhewlmoAIka(48BM};oR%mN zkv9W6yW3yH1J!;mt=wiZ|fTk_}?`5+<&i#yCx~Hd;)QT7W&MP1~Ky83mfa6NPzN)Tfm(QPASAwbrK6cf2>nx-Y zEgV264Ko#BqL+?-WT<=q;IL?6aa8Ckk$im;+unZdy@0V97xS{WUfQZLQ=hFOw zf|eR-eh0z?&2xU|7D5OO1$>Zg&>baUy$G0655Oz*BQg81x{pfGk=z?079N^0W_mR& zc)`le6)>RC#Tvdnm)%@keCmtGw8?X{an+AqYE7?vgo?*JN`oAK1r2Whlw?thf|ii! z((+SpOXy|+%Y08zkHybyJq;sjgVrjN(tIkzjEs>Dyx_;D-^&n+r#{( zm~HKGyvG2_WZaWUVXXcGR>-vMU*agn6=+llm9X?Y<5M6*ob` zr4w0%@zRFHd??mq6Dj2lebfsdj*UNmoHItzN2N(CE+jTmO-3A`L(w7SY&lAb(F{cB z)6Q(v6*tq~E)RwJhShFtmwv|o#ss8C@3hc{;3Tc4syu7;jWqI96{* zA559m+R9A>A}DK$*KcER`I}%Ske>ee6tm59J}|7VrJF@^$(3;#tU0f^zFD;$3>iOsQL}rQ4NL8#YO1_w9jKPWz1f;!}ry0Kh}iKctMYV z2qLa1jm|lRhAD-Km1TsP?mJ$rfK3lsX5VTc8fxfMn5-ybXaf?CJyFA{L~cl&*CSc3S7#jTO%xjNpqpHaD(_~hbH!? z39TzO(Zv^InhFQea8<`=2!&YXZnxi$f3I{_Uz#b1sZ*?5KnlPOans9eImRcb8EM;f zaQc(w1wY~vuLiTwnuk_V4=8Z&asX3!S3ul86iLwUb}g;0=mXH$#~){6hsPoiZOHa5 zG;&Vy1(a~G9;U>1f6b&I1WHGYSo-qvGp`Dxvb1jK>4Ss-N?i}_CpELVxS*|$N5n8w zkt7#ru5^Q;?~Qy9c)*+J!_^tI6o4QD@o&c&%-`zo1RGunCLF8o_qXGpOOKy5H&a>@&h?4M z8f-W@v&IcEAmT0YR4#(-yYL0iog5aAIEv)>`=q3^rTm_0|!v@>Ce-g z&aW7>R_gJJo1gg7N@nA-%#}6XoYKU#f>niG#ZD|C?SnCqY4@um61^MCCfA(&`{gK^ zwBhFwCzomWfB$=?Gbij;&?F2E8W8o5`wCz1i6CSXfs1ufD6L*ds)HGZ6Ce&5wQUBD@A3} zRl9j06=K?&_bSw4Y9mo#q4En-6zT(Xoh=DYO55bHa%~m2l2`LWhrq78KNI*lJe`e2 z_DbnZB}Hc9a(_$7#^2s2z+L5V)2u++x*q<->4z2$ZvASlp?*LE0!B)en!;@anO_8M z!gZ>T7Z-wMH@@)dhBn*GaLJH|V-Ys-oDz)a8+;k0)V*7gHV6gfj*y7Juf#6|H^? zj;|)o5#gi;4(F&iED`Fp$z5%h0XC8QoD$~M4^3}Ev+fV1Vv-NVCV0nXz?qIMs*>iq z=zGCT+Q8rP6$s1-m~waNwSG~g*iLug#|=yKD)2J}DRq2oiHTW-i#eCv-fWS8I3Y|g zAFJ?B1u;HAH=Q+a*!;6Pq#v0F?kS{*UN1fv#W^xnz0gDaR*OvD*GjUDpkS0rai}_% zL}^W8N)A3G*D0ryHtzk$W97aw;H>T+WCr!C+}rt?6H%R(jI@G5NMfTj6D@Z4ZjArR zoyB&6C4!eAUbL z1=9;?k+Bbri+~RjBLYw_XXkDXOD!BEZWs}l{LgaLbA1pgil9k06o~ik9TYyNf|mxv zvB(AqAR%>GL>$r)mh?GsNo~${daIje6;2G}5?+im<{r^xC$7Nk{7WnLSC^$n2pad0 zb_lxgPFw=iq40@~xPpi^UsGI^_5T;J;T z2fd%WT2X7fh|}?M@kUC4`IGWe6_XFLp+= zbS2z;sZ?l&I{~xA`js;~EbF3>whi-(E#Qe(%SNGjbEI$DfA8DB7b~X{%9AxwId5VG znvyB=%&%G_wh&*Vho1|e0EN8vP1+c>*;H_b9|f!!j`fZHa(pUIa;ZDWhASVR%q+^A za+~lhcQ+o#!D^;>35TrpGM2z0R~#{(G<)!iHTk5$;=HZZy;r+_HQ=>(f+xn`)zQjQ znNrJwU?Prtc_TP?8h-%0Ik~jcuaF1@14U!X=H+ z0qyc@X(qn?xp#HkEwO3^-| zq{Jz<)%r75hjm*k54%j&z=qF^cV|0w0fKffk@bT+k}rP7`CYkk9Xy7Nn+wkxw_~us zAOBo^-SxfRXSc-t1D4fiY5Uac?_5{|YGDluRPqwO-kVY`ge)mmkK=ItP zVy<1uw2$>(5YD%At-;oWR!kA0DjI1`!()}Xq-8a*B!oeNOa$4C-T+3NOOvYihtOqS z=K+Q|4x4{LQ`2L@x<0LACT*lsik?Ux4J}kUtTG0UW{}|<{QmC*SA6>T)!)Ux3Dlli zxhrl*uYi%{s#o`eiejTuyNxD}2kdWet$L~0Z0aHvL=_Ifs`dWk?zjZUR2{|D^k*@v z3K?eoi3@+=SaB@#Nm}CdX!}AMINtNFxXvfZ|4$d5#|>OsDt#}Hr)jqkGrd+;IrfH; zSGGcAK8b#YW{vZB)9LMQ?QX=wm!D@Mny8P8=FBp0>Dg6-reyvs>0T2l6y7ABt4zgZ zt?=KOCP+qYik`$W`yf@~v^q38F98qW!{G95g`hGg7YFfS?&ww-DRnSn^LQ{`uY&L= zdFPP}o&9*}{&puv$bSAW%SCA^P>2|OcUXe+O2(jl(&$+Mf`$oE>m2uorRrdZMhcpI zQ7|b&f#z-%{3wmT3Bb<#m3!x?H%kb7CT=y3e7RiLQz_tbMr0yE(Fa$`Y_BJ{%C|4< z$Ae>_NA+N?*mzSKHjb4vDpGR|qbf)=A%F~eNJUIunX(otQLoo-+s%1aH#>uPB87Nk z`u|`nnNeif7vMkF7=#q6zp|I|_%L|!Q2Ma@S$PVL^DYYf4~czJ3?x#CWU}L(m}Fds z)aJ`Bqv&-v;^;f2R}FLDTirmpP&>oZ3{uk0ASY#iIm}}9y}j*VxK+^g7Bp{2uMlmI zV`6|W-dB)V4NPuXE|m8-Fy@%>UVgELM-Fhpmtk@7QC#Bg#eok;%8$kn9FBiSHFq=d zY%YBGQ1UtW!m3s9+li!zlj0E7_HlB#ifWHHc6K}Q&!y$XfXYO7M}OW+kiS2xq>QIx zqF&?5aG`=|FVKiVBQPs8!uIzGsHxcvNDb4iNi1E6cmSPivlmyNY*wv263Sb7Ab*{i zrQ;KU8!SSEJUuVX_h|4m8csE3!P#yRtBHYF>a1gTJcPmnK3x$(T@uZcD~HGMkHDmr z!62&P!feWG%{ou8EQ;V5ye>A}N~aeaM(S?Gi|IJmVAE!i&Q{M}9vD76yj%1_!bmz` z!($UzS=?!V_lrqVRo<)*O`X2ASRV~%DE^@Di9dBTnI7B{H1(%K4%8qXLnUT#tJuT- zDJ}FNZd_bQg`-ohIaL{UcxlBZKKG*g4i?;j^YT&C?M3xjwajzB^?X9&nwFn_=_^2U zsv#@!LR14$BDkCx&A%jWO~G}bE&hr8S_vq=1mYMEBTrD*4Fuf{2kFw4PgI)qyucTB zAo5`%HdePg{eHX-1oc@7Iu?qp@}(3TN;Vg{#K|dM8q~+{M8_M}15Thyh1+>p6Bcu! zT`&9j+>mJS6M?Ljpb=p^hF_TCKg!+E667!q21I*}mQ$5NnT00*`FyB+CBIS10uhfR zkd;9YwJ?2KWSn|aaw|jFU%w0a^?2d2F9~v*4mvW-%hC1V1HX84Ok;@iqQ_NSfR^`* zv9zz^r%!+U)!*L#)c2wRJn)JAJce>9m@LSlZ&AZXrb2Ca;^5*gl?)_@SR#(SdPU|Is?q#n`tr~hb~CD=!v?m z82mq!+polBlbZf|tA$5!#N*le$Q75W%~0IJpWmEKp-QiWvF4dOLr~yc`bpcMFiqp5 z5hbyPi0wla)0@EZG##T}cir5ej10G4Zg$W^fs^$2@gHC+qEqGcFcaz|jZJbF@hXA8 z*ibx*%XgFa6!g3*5Q36ROF5^axxZ<792e|4=(TaVX%sTT8QwIJ&|3!Q`4PM`8piUR`>D9GjHI5) zfLP~>P-wAF)i8LT8eJ~O36jUVejjhu03U)G@dz0vJ0X$wOT{gZKALxX7@RZCI%~ov z(+?kN9)OBKS}n`=kION4?O!uHF4lkm;b!OOo-CB|;S~%j37}vPl5}NN3eT$y#dML0 zhG1+Q=_^FIQ?u#)LYi_pfWS@oQ`$eJ6tS4Vd9x1%}(E302^QB!UW^{ds6)MK- zHxr&yBs92Z=StXTyZA_n7x6{%*QtzExf8!vzZaj+-gO4paZDNa?M?N`*_V@CTw09R z>XXdA$&1yBvg8u`1JvTP(TDt+1ak+q9@{M&I+}fnvb|yZ6{y)jL zlAdD@4Aihdz&9T$)EV!+|M;QVhOsFkAmX~9Cpq$&HuC3JvGXe=aC2HBj%PIGDUh~) zD+Z`3B(6S2aXg-s64g|5(wITebV*7FYI!L^*#{*$gJVfLxtKW^Bm^c4q53Kv%Y9%T zaP4?eobjOR-X{pTNr|3p^at@N5+&swS0lg*_Kw$U13O&|_EP-r-jFqP$MWGs^sqa&A;WiZo`2*7aB6U5N?x**E>AWCV zjF_B!Sm{LMns%)rl78#o_?osJ@g3m`XbeanTbDWlhwtvRd;S9an1EdWAqr zbd92Iz`?cjhF(j8qSS(fmWZwec7i;^kUl(uWSWgo%*a)mJ66!QA~}ayJB;j57r#{F zjeY*YG*xZ@tIm>z{e!_OUr;-|zX|H>4K};Ipd86q4N{9A311Ew;Tm@Vkpyp=3$CZk zM@&IU^j|&biif#9eNROu%#{&e9h5GlpNP4ZmyhEoXj3kq@~{W#;6o^6lJ|uZgGoHe z7@2Azn3WRGCOC;c4|d_%#bTzUagrVmXG;Rr+iJ+C0`nX&#S~bnUraEN&|$qzJ20b9 zR3oH-4$${MPKA}@jbB}rw;*pXe=?25s?cZgc`OzUT`rWGaqxV<9~bDoW?5U>@aQ&z zrQG+c3Bz6D{o;E1CszgZW<)f_>UWAhG6{GgSUdj=m>BWqr?tp^$@+QozUuw@QI}#kVvPsW?I~QG{-j3=LvbEF)0?$+wU98PQ&xu`DZvB>7Q9oE1ZH0OrK-+8Et{9@|>3owCWyTp)hiZ@16 zoyqjLFy+cNWi<{u$a-$c0LN)R{ptNL2U!<`ZlR#u9-an*L`=&^|tI4dp zxrw)qhy>GWh|a-fNg*&a8ZQrBTpGu`9No1x?cVZ=pbNpp8WtH##E8|H#v(pju+&&r zR1Fm%v05Z6mU%csPZr8lv4-d?WPSAP`GPr)@6jWy7F$yliy0Yf0$Y*34_c}eus!FivrsX~|GQ(n#XL%d0TB~+T0=khAMk!fH86%EK?(?n}jH!y&w#vZ@* zUpMyl)^<9(UJN)b`k#`Sj@Q!xXw9D|k)Sxa1ti7Jac1|&I8tkAJDz-%6Pp0usg%b$o`Ny^SN%d`h zah&}J?yrLx4@yW46;t7Zc6gk+6lqSMt`tLbe9V&5r%w{s!2(Tx{A+}o_lNd+Y|?W# zF#xO7KnWIWa~ml#<@<3IOOKzJnC;6&EV9iF=sH?)tV@zSGK9q|5id8OPjNA9Uhm1j3~|LOJ49 zASYx2;={}tEE{1wC%0Ai)gH&8T-@qDk;$iyIL>N#bTmB9_6f_2k4@eIus2;r-(;nW zle=2zGVjH5-cbe2=PKa`!_v$VXEox$-sGi#W5u9Y<4|LPRKJ3@52m@&C%wdn@NWk2 zc{i=SxYMZqT-wtoK2%ay5@=O@2{p!CCa-3HdO#7Kf4*Z1nN7R%gThL5B|WAkLA;6{ z1SS><-J`-0NM0Zm2h4peAqGal5YjE-5tYSrhG|^%@%6oWnr1a{W>L>L9cYR|041Sm zi3AULDgE0~Et6OSw;M}79%bsbKUj<3E-fvZ$Pc}Hi%OP_JnPYhsZd@+1z(oaQLSD6 zM015v@p>a8K6%OD5FbKoXs>EjuV-yoTt+V8U1gt_3J4Uhm?j5#`;?Wf z@i@9v)9xkHqXIA|1cF!6$HnVtX(ifyq!k31Emk}s^AWMx7cb&pryoL9&|G?!9rT9# z_^5gOkC*+P4O$NH?(;m^9zXlfjRDa@c}0PZ*HlEO^(KlZ6Q(xVc52Vp#f8PkzH^hS zA)yt&h1LQdBu+yon}gN<4BN`t?>Jj`k?iP@W<(&0qeznrj&%=PE88(eKjNjR96RKq4$69P`!#9lKIYFx3lAYU}jLZ zM6lSu67a1x5{(5AyJnbWT*W{59WUi#LR_)tggDvD;?T%ps1P{Ngyiof;eD01rv>}H zK4Lshrr%ZOPmfooxKxq#dTT9Krc$Na)4a9IoHLNx2qWP~=14Syo7L!P|FB>X$RraJ zpSfM$>=BX){T3@%7tA!L4nP>j`5kGL_`E$vet)iuRu7<%1w3jXTEfJ`6{RR8^NKAL z$$LZS!IzO|ITQqkOCKm6k4EG{ zUj5bNU53@@&pt`w^X&$v@JEk^A3lVPyShybw3-q={O330JWwWJLETjb4$QEs^?Z9@ z;$Gz12_ZU!3%am~Ui?)d4Pz;C+K)<3sG&amZr}|#E`G}kkiiM>Y9O{20jm1>hCskH z?V+vpK~Q?L%vhiS$8phITdSoD-c1XEnoGE!DlUy*D66P}k$HmKrP{c`Na6HyDt(lO zH`G(Bzj8{OqhgPfVsdAXn}af0h!_n~7M@!v*$UZ9>Hq7_Ajqnq#jv2|G2v%$`GVe6 zUS~r8Q=VEDG*=JRaWNOVPURHAo%bJ?7nXyAmYy#;Mj@zSIIF-+6hZ}J1q|o zCk(inKb561xQe!PMVqK^64Ee3%Jin|ZKq(M6r=&YenHo?+dr@0+Crr(S2JuwEMdB!F!SSkqELd)3G-EgUNA$0)iPI z7u&I&pK9BEUXyE@TGqH2Tyi|lBt4PjNrtdDX`CL;2mWM#Cw6fE@f+U=3eO-$HEzy{-Q1a}Y0-|$VHFCxpopQB?*ad3@oICTz)js)$y zR3hA_&#KxUcq}xrq8e&R2C6hZPo|L{%@s2qI+vpJFNHZcYRa!jVg`&IL!EdK!9hIC z*v$RM-~1oq-|EU?F2Yw-O3=S}K_3$=EdOFw@-^|Ii3TWQPMY17FO+U>ZIudFkn^IG z5vH67G@FB0(fx3eaRY=I;c%-LTxmi^N)pDNgckM^2D2WUrc58EyC}?hFrJi6Q;<-8 z>e8Dx9y_FsYls;Dto=^>&?CrEqW%g7&H8KA1RSoPTB>CU>kqfR#4XvfZZx{|R40u}fnQ&iZn-@f?CIaB0mUAQLkB(E{?0CvHH^M!#HbaE^e)?h9Q#d8>f;mXQd1Q5XNA3Sx zEGx`IVBfl=%8u;vcKmPp^k|NxX z085)<@+FbGSh!-YCI19Q41^us#)8p#uyk8~`D*;W`obF+q_L>&p>oP02tNJUS3mPk z2AEF84_pECT%xPwtl9?3uVx7g78=jPCC=KWgX_E6rL|=Xd~%!EBq07`hjSqx@|08{ zdjNMUQCMfKMUBix1az%{bP~1z~8Yr(kh6u-zTTnOt!qaVLajWE5nEuz<3s%S00_;%e3JTHVYR)r-#-V?pnT1Le)x zC#Pty3n?|xOt1(^>~6;$;7-k%!^AK1q9TxpEV~=?^3$}}TY43JUpKsq-n|;^czso-UMOia(;dYIKc%Q?euLID~!sOZ?CN=c^zSZEXLO%g@ogzJPPe)iEc zk=pkkU-bN*jq727yKo`rVb}*;@kYsbR7cgxag1;14jBI?h@N6w;}R1jg+(z3@ZIH+ zX24S-1=qzS3_nG(I^NmJZ+yHD&~SLuQI3+IESWwj`J3D0nIRibyZ`w7*@A!Qk5d|> zI>#wTu&H<4lfAc9TF_vA;kX11rI{<}vi07~D@D|LjG@s;(n#;{q+F+9sDTp;)4d=%ck3nvg3)ZR>@^b9G+Sm+XHzd!1 z9*yTH&)_mCjGuQFRYVNvOX=aYON&dUDrI8vNX3~ir|*5_L=CCv_15=s{adSyxQ5Hm z%5;XceleX{IN*n3^-&>BUK;-L<2q^>32>-ri={`8y&8(IFUpl|FyLf0m2sD)2}MEj z_MpU;xmgs<1c6|gVV#Qzd=xcIB+1)dRrEOZ=TTmgRYug5luP&Ij`3*3>bw*9j1K5r z{J3L~G6PYY)8ZpB%A}waSfJuV@~W&ym}G&hOml!iizbPzm|U08tC^MEsj~}bZHT!#OW zqvMBG4;BZ+LP`Yskk*L52uUL2ga?x6aBpKplC!ZJS^aCK5SZm}XrnKPys1ieBi(+V z*@Zd)it@1lLZ|G>;+c`gWY?6fNl%DWDf39HkP0jio|6}c$9eXFMOhMmX;@zyxxHsj1M zRoTSwF6o6uhr1cVr#tBTlcQGLm%h?|!>^usN9s-sl`c~wjxNv2+&LN*FpRHk0+d2< z=?R$eBBD;sc1m9Hf8GJb+(iB^%Y~Jhn5#lloD$tCpO;F-<1o`H<;r;qRN{|qA$4V> zdVQ3H6so>fpBz`uw83C{LP#SRsKn~Hy6S@ASkLPAUcc*S9}{o4=upaed}u-vFKOk1 zxkqX$1apfcSDzvt?vrU^+f58HHM^bdc-vG{W=n#<-P<6%46U>%LFY?U6}ifPbfpkzSzASBeA;^-1VLBf8SprRU)`JI?ek6SC3pw zF}+xGO*@2?)z+{jl43jZM*49iSjUBLXq17fJ1lpxM?ePswl2Q1{`$Roo6%_IQ@UD)W$7SM>LY;`AhCpXcuNrFW6krMnf+3nZf4Bx&j z7UbIS9g%B4byXHbf-CVIugB);l@_kNwh@cE!%Gn~Kz_3?gd~U}e>DMks1h>}A;m62 z9+KafcPg+0gQFR`=wk;5^d#)meN}1-oN>H>KHht)v%%iA@{%nG2{txv_@d|no675VLATkZ13LbG~*6j(8 zz~AVgBGcssgW$$mpZ@3jpL^>ow6qF7pv?Wp zFQ%#R9$G~glY^F8#R&oHGAFDMKxI1@ln5S`%O}ThlGdZbSCb+Vaaf4&TZQb7GMc1u z$1U^gMSkn^&wYzQeSygcI;#kYHcysJTTIGm%pQmQbYa z-q)=SMRLNd356>+)8>jSbLs;m2V*fSvrc1voq*xT1U1xbuc@{-tSdQ=5dl8g77s!30xgY6bAeOms4RJKjK1jO^Z^(;UOni7!8_Smc~|l zI39B!C&TP1?bz!o_6#1z1fP_5-7x!-+h6wI8(kFHN@5?6y)e!fbBz)JP6u~yoh0~m z*5TjGw?l`RasHJc0CutubO^|r?t7O0z=s|!_}i`Ky-2$@F`JLaApljH=n526uPTMP z6Ta2c^JH+vpR4d>s3Ned^Rh*<$Qj{Kqw6&JnbA!6H&_Kz3@PbjlNcqSn|2ECPv-_9 zNKW0oMpkI18?4B!E>6#Ccdy7Ys*Hf~1}z@^@7sU=F!A~K{$Ku!BUzi2_#7IpD8Yv- zL%cS(qliLzH7%7&K`0ls^hN_;I?kzOBYHHVePWT|30q1ajz>{v`q?pw4$j6n3C|ba6+6W4bjR?1^-_I{5Y29qr7V^R95U7G%W2$ zi|nDoE5nX&<~qY-bK_ssstJ_dPo_aBQJk?o$%=$#Ui!K6->dO?l`k1kt!wJ8sVPUr z&4LG12_guSlqT}Xd<8bqO-TA8npa!(NO94qF^n$@^Pb(~E)7kC*GMTQ6L;6+3(`J$ zPu0Qrrtr&apbR&34XN-4IPqcJNN_pN<`%`X$HQ#Rfk&c#RPic|*5zkbu~MI!d&L!w zCsD|WQDbU#XF2xZ17ZVt(1`OA7vAgDm@>Sa<w`te{>r z2g0~CBfTRgFcOrbcCSNG@jd25c}3{mT6=~h;=#pgpo(Bj|It#fOl=xet0F;qE@7jO z7oPcufY5=7OgTkwN|K(Xl#)CFJp(akFHfrV3C>l{8v{2oYV&!PGOAx@kWdU$4wP$n zF)mj{gJpQMxk#DxrhZI-V|XwZ$gMtNzEQ9XEnzyEIca>4gibM6s<<|Ne|37 z2}ha{SD@ojhL4y9Wm$G4h$lDGOA>0j{3vt3K#w#`Og)9>BGr8eoi~s6=0TwQk6Z4G zAjxx9OaRiCCA@u3Q)GeqiWPcDbrexluk4sY{(Qhdyiyp0Mj-7lgJgSaIpY%5oh1pa zm$pt9spe(wzv{(Bq?Ih0^4cz-10Qkmg0CK!*mwpd%oSUVClL4sm^3XJgFSVO_1m`k z`|$&{lA~hJ_g^vc*&ALXOJ8I^Z2@t;fKrrP1J;8Y^v2cOY6f9TKGrtM^sS?4iCuY( zU?@wW;OXeSZIXOX2_YTJf`*&@0QKrQDA`uVV*wfCI^m+nA&iHGDkx;C2raBSEGd2( zg3PI9WeBpITRR$MkzcDCns>6>=ryiY4H-T3CLw=fT7p8T$aQ9;xkQ$AZ* z^s5n#q%*4i0As}M_jzC}VMzJsLmJFi==%x*qTpdNT532yOlU9xT6&Eod#q7tfFv%r z20Kx%B}&Y1IKhLsE}EJKZy5O@2tNJ0U;SZFRVH7WG`b%br56l-)a0PVn+KF8pO-wv zJIpQIdYYe=P>*fL5ZNne3G!W>fpRI4brW#1y$b^+Y6g-VX?rz;6sHrS#AgsVmvxn3U}FJPz3fm zo-sg;C<^nPJa>3@IeCXr@h-?g#rN+&f>1$IV^icaTaHt3uTnE6C^hC%Yhr2kUsHGl zofFK(>XOm!KYsQb@k&L9iv2)BNTKVbG4m0!<`)R`-^J0ccJ}=)!pQWny(wZDjbQs! zEyTY1nL}D?CrNJ;fpOBl<@_eYk`m<1l*y{bzyP;H{5U6~$TP*MzewB$_YR`@z>g^_ zn3Kf&D$gzeg@-C!4`bbUV~Hjy;k`1^3j4v57ZV5MxF`z$uNIX&fi_WZ_9X{>|3z z+TNSK3AL>jRpUHxx_GB~O{I0sT%1oqLS+AA@*z2t?V(vpA&Kgius~A@VBRzJuUe$t z3*PHHdok4%ZBEQrLF1RWVpIy5asyg)dD{b_*>kxArbCp_b%!Ao@O6$5{jKlDy}*Mg zL7m>HTt6;w!^zp%K7f%N34qIZC(+u5h>dlVX#Lx;39x-bBH~QT0YQgihgeKllP9ZXtT-A+i*=W2d-Vpv5NHD@?{%jrqRuWLrYd z#GT_6eT|rR9R+q_1`9fv#3k^2#5P(&w&o;TyPE>weY4kl8H@Ygk7Hx%-=)l5M)kM3 zUK+I$` zz;bD7dUJP)yrBbLS@@-CLMN=k7Bt02aanw^tvM)W=ax$JQbtaC@nBr^Q)hQ0XiqC6 zE^#_vTqLVQu8#7h7+|?58bZCx4}a&gV)wn)AU9tFn*X;t$++6ncE_q6{How7G*e8$ z=nNPUBlZEVb`mVRH`ptZZf6!tH`{foyufqwGG-}x$90JE5HE~F>9kfN{`$Eb^*LV* z8w}`dd}0i?q!B*F@bYs@H|mv7Z}B#&KBNoo7eQ*kyn{|}5HIJF5asex1uexMN$G$^ zaEGVuAy$*-iNvd4)E3s~Z;SK!y4#Bdw~C<}*8==I7vh{Ho~5FL+`wGUvnqTR-j(@V zWHcKD8W`&sj7mOd1AiN%EdI9icq#r#tHodi`JyUaGNa+tUXo9nXXz6-&9q{MSYcq) zwYPYnVM*>`ai}nC&GG{u{?b*2&6k=h;N76KAY_YBaK6)&FsRDVnK$9%h%5L+=^;8P zrVMt4lgo^iNY4Vy1>or#JyJEi^r86&y0&rKZi2G4{e}5M2BW?Z2ZaY+!49wv7rEwP z3Cy$rw-wI)$62T|hhtLdVG_S4#^cvD#4V0%93YxGhyUkRk;g&NN<&7L*@i((@Qn#ZJe_`=sw4Dg_DSHK7Q>9-S3!g3 zPzMkE+U)H3I6Fj*(mmeUTk~HASh^XjRl&)yfDUP-I4q5hIR>IfH6ApWmj2ejGXc{F zU6-Dh#A<6GGA^Y%{Tal$!UV~w%0ctFonos5Q}j>A#|2VItW#!E7s}xk$VU1&KsS zu_^1Smum&yV!y03yEkSJhqu|?!tDwsL#~)0|Vbxb-G}tu4 zxsv?x16US@!({?|zW@F2`;T3z@;PwL8@jM}gA|JPe<)s%GI>?>3$B)pvrv1azhfJi zFTIX$9u#W=yLL%Gpes&Bz$l%FEvh4UiH{(itL#?F?WVZ(njFi9O-|aBGl4yB%iGb1 z_)~L8|NW<5#O|7^Pi|_%q2r_Yp+mu@Vo7j1Jr62mg>)cJ8zpfjW^Z&N*2p{$0{n7p z>izBgKVbO(?5kh;8xP|e(@>>1#2f#$L8lmt8(Vq){BbOugV|c!GLNcs4!P*8CY`E} zGG~TU<&6*ls_7icop7$rP6KlIkkh|t68b19p@9r|TwVx4l1KhDaRZ{2x#aCpN!(EE zMnb~-TOD5F|6Lc`flZK8L?{HP?$a;6`p5hKKtqKjp82cuHcg67Z^yO)dVhSQ#&z{R zD*hrISI~0#@$-1nf%fAabcAR%8f0&KFHPT8RdSf_Pa7IxLXdmdGMRVajtr+TFQ?S8 zeERAAU*G>@9G*tojHqitH^;LUm490B<>8Nr>@oK|mcKXX$3KZrd+%Gct>?ba^W~UI z7$$=j?`h|*|AXy#{-;0s)gQ(GdHtQ0y;o^@YH0H~0Ww6R66UcoEqY}ggCn*@Nj#o6 ziKJns{@!GS0nu3EngcOGihT_t#vb#Ylop&^5^ORCuD|u8ZYU(8TE7{~hL-wR{5T6v z76sGqZg*D)@n;pzaP)EKxl;rMYBr#MI%4Vy!>WT-@H^MK@dIOw&&zfxRA@>e1q$Qn zsyFxQa{S(0HoYXO<5|M^oMTew!2ozSv;*{~Z}7pv7RQOn$UY}8DRt_vEIwW+881LK z&9jNkGC~7nOUdIx)`Nh>EmeV5A4;U&opcW!?QT26hx&I8*>f*}N<&Z-c1z%5T1lN&p zu*r2pA2n|V(5PBX1=nqHWzeI{A4OWmP5aj8-!pgjl^3CixUdh6-FZo@x23Y*X7E*z zTfdHq1SKTn$xF-2Uf6*Cawh5uNx2bKQawzlW{)8 zILZ5^2H<54!rYKXP0=88R)ySlRu@AC~zwq`ouD+rVca_L~nLlshJEQsM*}iRx#gKN`PyO<*&S%<=P7J~O8t zf`T`Us56v9P!%j_tlJ{LyEB_2L<8cT?p8fhXm|vTFaT`pb$VO=bLRt6ny|v{u)!mUTX(wxOK#RSy_)aqmy%|+y+K?95gJfob${vu1 zrR>4fIZ)#dx1;aTLd9~i+wXe8xFW4-jT55-x)rC;Sb9{n#Rj44xCoSmrGmvavDCwT z+1iayQNmDKa&+Wl$DRpwm(cF=vw{(#tX1TROGF6FKIaT8lA3zf&L+d!L>MrkOo!nG zTGDz1=NUW0M~`1#vAZPI2&7=9nLLUlG$?3MUrHm|fR>?tk@c;Z2J7p=bxX^SVh;y* zh)KVALj~kPq;uxInyK(qfPUio#PKWaD%5>w<*j0a3#r=rb2{x!RJFwAB;`k#O0guS zmstP-s+UJw%nIy8_`NKKQQz|Mv!sRHlISc^na#EAHPPEE4Yr6N5@?IV25G>V#!m<` z+FW`at4gZyUXwlg^|?(d(eT#ReB>nW@DL>0tfNQjTu$aX6Vf^izeUh+_EJX34ES`C z8KaSyXSIv_z<7dgk1H&E1uB|qHwyQV;g0iCq>7AtNe*8iDuLs9DO4JaBmyki4<}c5 zE$v$wYrDIaZrvj+?qHZtKfnKFFcy91w)(Nk(Pha3H+)h0d3=sG8NoyU7kTg29O-tP z>5cz_?eIPKNEWwDk=+3LB#D#@K;h7h6RWDbxhc|)0#E>|aVQW#bvIg}iLf-+ zOi_GjC0)Qb!&~Ph>57HwY-~ra&AdX*y2A6;D_fJ29t9TJurR)xA^^QKW9^5Bd5En+ zR9qG=gY;ccmylBX=jZT8ZcOPIIXrwbEr@&~SWtFaA<5Sz1=~|vWzO7>q^!?N&3y=> zmVkMGIKH(X*;0xpMIeJLP(uJOvZEFZwc)BPW8V+-;T*^8fm4E2W7EXGD7&r39ap#E z*n=eq)fInhgB->*qeawR!uVC2s?rnl+5pQL>!G#~mnA@>7&cvN;@;Aq3Q=OtSzhhO zDFJ5ZH5nwq^z+_Vh(4o#{LwxFCGFSOonALM?!WtXyh?>9X9d_V)^g*_TUC_@Tk((A9*qaYg zAAkB~oXHwyldlJr|4>W3k1RRyVH|ML8GDk{*c|9os zOniewAap)^7NCleJUObBf(lZ+zz|B>0J1srPd<@f*D_y1X3A;9j}gdB@Z`+3LJAiD0{ zznsRR<>{k5E^zK{ueaOt`v+S#ijE6kYy$f9d4q98Gx|o;Cy!|l#F0Y=sKpuHp*aG@ z&V=`>iAex3So3Xaik&*`QmKF0hOw}#F?r+ADVsmEDxjf!i$B28ei^e#(9_eDNMB8+ zgXw-@1uWLl6_MkrXYi#(&YF~^b`AG3gEW26L9xNt%8ORZo7y{oA6m25LFh|k>x*Bt zg~4<&Nr#56nf0WoqO2+s?8wp0s<*WR7zy^5TN(L(MV+g8pK2&k z&6@zi;lI8bUZD9457EcR!89x-597u%$&^K>G-9J$MnQU^5|s0;fQ>cbk&0H^nT*mw ze3b#*?NWM2v!z;H@#1Rg<#+qpx*Br{n7JqBkU;wRKmLh7`eIldH=F=@hvk0J1UI$U z;=+=NLR(vSIq|c1AK#GpGK*b;Llz4KU!pgaNRA(ACx#V2XMSAYfJdo3Nn3vW!MDLo ze^6Cek5U9zKhkw(ZqZw#Vb(uQsocP24i2N!0u7slTX23KcIxzlBD6TJaruMR{;3Eu zz)5P2qC9eZg(yU4wNM6m&smy(xvgB^vcgwGicIpd37}1-chHU@W)pQCtlq*hTC!3X zD~trMJ)ozg{QN$Q&>8N?zv?qtgRqAI@_-?{DfEcu4DizGt@b%Hi&hGf36jJjl@Y>X zU{A4c0?@PGyP*d^|JmK&fBv(e5v3qTcPrm=UJUxt`=3qYn2CJ`lMuU;ALk$Uzzn76 zm4}kn0u6U+J0-?1-^4ml>^CQcGui10UQd>t{bn|TWKvCy=`e8C)u|}k^Zk%G2g#JT zMwEd!{%4r6-jeF!TjmDYJ~vTHG|wnsps)4~edMaiH4p$+2PL zz-cmFE5|x#G*>>EPtX8yDH5U;^B6jOABBmX?Y!LDGV`rzATG9KybQIzxgMwUgKwLh z=!3U~x)cM~I1b@U4ej)ESEOdRyYYZ`2of%QyoMbN%dixhuH27-Hm?cP86Q_9G0_Y# zSL|h_32?_O9UT;0>~DQ z_rkvSz3>@n6<9 zw_+Vf$i3oqtdbNAF>p93h^tkBkc12Mu882!KSy#hPltfysC+^qaB{J(hCcDo$w``C zShVzsDt7Wq4Vb(s#aLjqf!7L&5vPM9Ibn2a$V7xX*#3ip%oApf2G6G1^=X^NUdoo} z2tQKXpfJfR$3aHdgvPzeF1WCU?Q_^vZA8H99H-AjfA3;pp0^y6(=3{`g{lgItAA4Z zgtUz@M^?Df<&-#jFet7U>5sTulYV>!eN^Jg8Z0lju3#fu{cKK+_FrTfMVaW)i#5(ldDpi~A%>2G|Xfhy^Q z;wKB~!YtkR#Vm`$Z@gp;Qf#J!A3~k#WL)g%BSqlP%6B@7w@O66QP~xBvZ*u~}td=UTEW z?IA-suH;R|MHZhGoaoN@^mJ@dGI~2aV<4yK;NxWlDs-%fPe!22sw@1t&CBMqlPAhI z*NZV@6I|U84jA`k=cw|1nwrJUQl#bJik`c%MIGhH9UC=abav|S`&lQ`uKj?1zBrfkOR?^pc^tc%Ghp493EH@O{($nIJL4A_>xf7uZ zFyP89roSvP0M_Ue5Ll9|uz#_M1cg*_6NtVcj`I{G+{=f1%HcTv0QhAPPa=^$8#NqH zI-!z)SI3Y2S#m8te)sVL`9{RvywDqsj%?qo&bnE^Zo%(6Srm0SMwuqXo;^!Qz!tI} zH_cbGb+afN*wvjXl_LvJg0hoIQQwHrIZy)95`~`ZxI7}-kQW|}qGYQ#Lg$M#`G_E- z#zzxfX9D2hrX17(j(Y3QV8&RZL=2tdpdID!U&J*dP1QHU(lG~X>?|)Km%A`8Cf+Q^ zm&@nakepucDi}Ziw6*!71P2G0QN&~{PlW1J%T@QATB7MyShxI8bMKNDZTzafs^BD;f6kQ*!;{rOcpw?qZnfY zoCbbsQi?Q#|8h<7b5Ob8nhcVSFJ}v)bNIeVruhCp{f&QGTHGHBe}VOy_TKpLu(+zs z^Q0va-eJ50%VKn`b{s{h&@7M~oe!*wQEzh!8-XgQ3ZA+iF_48dQRy6uB1o~|SE#X^ zMuC|qit`O&w2LCb5Ab^7v3SH@XRwj~Jy-Fx4}z&Ohg4mNjQ3?{!MGuiu&K}yCB57V znl+6m-m`|~=tUB7r;JU}oDdmHj|!w_6MHXI0kCnKDKgxP!OW2Xvxj1483tx6s%z^- zt`$;w#+zO*1M|+4IS3!m!5+!cpK456EMC>fegz;z*(m0$Esj2x3#Oq4rPQ&wd{xSi zP5W8mCw^hdfIPatN%8#8|MJsceEyg5{0S;2gT~6tzVmmfLC_?vD`z&Bo<26S5w!gE zKmLh%NAL>)xnh?@;NHYvS8+LKZbKZ<;zAi&vm)rSiMgf{==k7HXAn22U;covsHQ_uiGn9kf_j|NGXW4A{h?}2Y{dQ zb2S`-xPYHP4)i7!3_gKBDr*JKi!ISBsm40##S2540)1gh!4(FT!Q1AQ`q5YM5O6k| zJMjZe#j1gM01weoy&4+VG&n|+cuhdv6%CK&w@B%*h~J(DJ2WcAOE4f0vOeYUQkl@s z+`r^}lCfpF32WP*-8`Ps)zrgb>S1TALK7 zjh5?0RBD)1?NZYlmL|NIA*eCVAw7#ddj60QEKb%n=qgN_xK=HoT{3Ph2`JOl-UlZ2 zEwTI{D&1hfX51w4ZY$UU=9*?n&y~FP z=NhK<{i!nSrZl)veCoV(!{x|Y=pf^y6e$+>fkxbAA_8(nGVq#^ zOC71Su(;%lD4$2&qiUi63eHpyPO&#Pq%;Z$a0w|Q#%g6fH|jp~Vb{0yI#CW2 zoftZ2@1H?RPT<$fFQ~9(o?xlK-i;pAVZ(q*2>U1lF*Rm~{VkQt~j)`Bc zZ1{}Hu~aW^1CShvUvF!$ylTR<*^F2Wu{g51^f@?B<#GACfk7rb_jXwg7x|3s9Z80Epczb zw2vV&_p07{JpGb_8}FTzBQn~s5hche%FMf^$^@Y3r9}cPI&rsS0Ihy2iB*-*`xgU1 z=7mR^+Qsb&+h)H4&}BeODpZTYp#z3gn}TiTxI)OWmAj8W`K>rKa)q?LJWEN0*!V^R zxIT)S$X^jG)F#)$2aFhaQUx@@hRR6WGRIZMD2l(Sm4rvsKQF`4(Nj1MW=5gK3-2u~ zKJlXWZm+IR(jsa=^CZt<0-Tv3mJBF)Qpc97F@gHuS8cv|I7vk*e<2r5>!gB}CR{fx z;|;h~$X!&r50KoNX03QHOgXGFKn>qVS5dWi_i=9aBR}|_+|S?Q>2Z3W|LX2v$A8lt z;J_@e0xaBig#3qOL}IzR^21_cYIe8OyxLs)RFKf8{!s95Q^G-ZY||D~@V55IYEEJ? zYW`_&vE`L%c}$N$Uz<@Iaa?s)t2>Jq0Ev-6E?>ZDQr4$lmT%&lhx;_#h(GviT@ zh|Q80|8UOm0RRH8cq6#LV+!;v?cY|gYipHMIC=+3pDt932IBZ}3bFGK9}GHL>6VxU zd0KRed4MpltV8dAZ%Pl9g6P@28J%w_Cgi2^3~{9t6dUN^SeCPDQCEe)eVc?wJJTB=sg6B&xOvk z1l($StA$4oP4>&{TT&6!Khz;ghhw^tDuQG(QUEkfQm}Jkcym=RBwOmNIvU%Ms__&ff3r;|N##o8U6iDavQ66Wd^gC?hqB-RG3a|Rk9k{}4O_%I_v%hZ zw{6-{vWij!#UoIf{-z|dfE-@sY5w2`Kk+fVQfYLoNYW*iL57gLgelac-#+<$@9Ft4 zZ~JpXUoJ&|b7@gSY7aq)48!z|XR)E#jWNY&FCxHQ(HwpB4|m0U#kdOcGP7uH<_yLb5bg?qnU5IjPFty<^&9PJe4Rwb-Nc9OSOD&im_`RB=(6=seO{JFJ(p-B4PHyP^D5$vh-|L zDiWxQpM2wAoTrL%%~l*B?4H5XpM3EhFJuz~N%;f+2CPpL8}Y0yp~Q;`DRaNr`Y*U^ zVs4)xZR*LJSr8FD2ir{@SkYkn)O!LY3HqxWGDE^W)= zQKkiO;Nf*S5K5=a407I}lpP5r*LX%~{^wMJAs_`{=AHep00BuHl-B{1(fAQ)646ep}ot7te z_2Oditb%l#l<3U?s)v&Zy02{)gzqW^@)GV|r)o58vm7xWy4V>Ue`Ynm_}wqQ_|6x> zUUwhgGJ`!Wr*p$@Vpc<^d;qGTr)r2kF^^6z4ytD8MD$dJ;xmN@WdIfZxr zX=QR+cF|Y8QFc_j_Z$7;ozX#7jgYbd%@Kd&hZEZ@-}}x%fvv^6W#i7YL zX$7Y)IjpI`iKYN1lu>;rirIa!t!#9p&N>GrKzO(ZYZzVX89# z8!0M?YLoy9C3gTevfsi@N#@9aJ{cMEr-nSzqAUx}Pvc3I%C*i=Rs0B@vhqv51SrpY zRc=LqGw*ez&-h2RkxI5!3upS~F)+5A;UUG1KG8P}hcMlT>Uo;t_sN4tezWQh#xPNE z{pUaZ^lwZ>3LgY}_LE>=&M8jtYE*iXpsAV6B&ZE}`RQ+J>?;2|gC zyS-h%4BQp{#Z82Dfm(v`)%7E-KJaEGaL{^|uoA0F8FnMSESJeTe0JRsdKvMg_y;XI zJ5Il-H+k_qKyjTaeUpqP?$cG2josUj28U#^ZRm#O-=$O;2a&k zzpP`YZ%II!xV;foNK!!F9|=cRRG?Tw$2Rz%zjny!(~RG-6{YZkZEq2jKC2!_RU>S@ z3G>j3;lSA4p((!I!fR?ORz9*phc0iHfZcft4fGIIcV5^U5rh^-Ff{#9IR7w^jhT)$vj=K0u4K|62_kdpXIGN6P&l)ot zlITM!6g_=aW*Nc1#IK>i#$hM?xMSwRryX4scJ9*>?HkFR-sEu3I6nXpm~U-?iXtjk z5$h!Yx4DyL!kina%-XMW#cdcdh$crvOGxzC_An z%5TZNn(D2G0HTT;XI{srM>MdkU_YpaAOfl5l`zd1R36Q7N?{i<$Ahx4Klp<`h{Fa} zg*5czdXp)2qiKyZI9&I{?G59btxJg*?# zzV512uhR^qEdRTyq?S75<#(iB3<7En68l*2)kfuY0*yxHwEt2 zTVc>9h~esdsI>7$oq^)@a^=oS*|G)?Gj9gd)z;(iO}0*)>2Zo!LzJLf)D1Xj^9s2g ze^Q!+AYuIJ?&BZ-v6s5S5bRoFw6K&lbQhD-CxF|k;yy1IEI>(JQ-z6DFc;GzO!~8G z%C2tq2R(m(jiOicdN)gWf&p8jzxaWZtdraiCc@J;Q5>6WJ(z0FN$oP!TcYFwpwU?t zSO~f~=g?3?DVngt+nWw2$OA>rq`scU4OUvLG;l6CmAs}+()@H~OyG@Fxd^ir@|Mw7gY9Pa(( ziLfhHpU0H}zgp6a!$TUG!5MSFZRMhyP3)z$gnYF{;l&A_$U)^i!k^%E(i&t044?v{ zkMWqb-QG6+Vs>}Tt@xy^QahMp>0tsmy8H`L^vTTT=22x+;<)it>@KxBs373IIm?g& z+u}8Eqw`B4!piW-mzJh!GqGtsF_`n^aNFCJ4q|@#)#pDC&Z2jX%3$J0JpL0#xci8d zZQn0RuKSQt`2;1Vgfc@;5vv82U`E}?PQ+?#S=ac?HPPmZ*u;bYxEQMwq(7*n4ujz5 zDhh$dQbG*J3TbfHK)dm5$V~mXkBZoG8=q=&-S1e;6R+GUOiED`?oftTK7IY^mwtA? zGbpC+GM>&f;ccJ)m%FcnArOF8f(s ze6@d3L{vw(yqidcq7N%9CP^h@3Qj3b0)7zo)vWcENk&p$<&Q-mZcrQG1!7`mPK&lR z&Qa>@HO{@L%r!OSjQc>P^Wp>V0x&avn}4RgC6ya-^vX$|ouPaD?e$?gD*GxNEMKL+ zQ(Xra_yz+#)=VPtFYf*~vj7ymxNa?|3%p412tl+`9^HLx%4$y7(R5#7;b5Er+!z0i z;K(QjVWL*;B*Lc%oC4=l*Vj&nyujS66`a{L~SBA;Bpz|<_{3EeZw*yBg!HgT41?cl7`0)3oEcc2KUMCUG7o zt*)yaB7vR#$$PIIKdmqm9c*v%YNDraZhDgxN!+O2UK~F~R+fb1F4{y1dsTzUw5m>p zs;0%L1ylu2tHK${XU$sJ#y+_k*J%0-Y=R3|%O0gDsAG~+2{Bx%RAe`{G1x5wwB_=^UB#{Fyvl3}#>3Ptf}qTmRn;59Dx?payAc7E)qd zsW2mLkJWKu8(D_Vs^Q@BRnu9qt{NLY9v-TBp2@9ops18s3Q?%PyZg(a1OtVO5LYjP zxws{FsN51wrjg$ql)(;sz3WSa7bcKUX`4*Gwbky}v7WV{S+!fCx1_>s>8tw4EVrz0Q_b)!EpJ-Qr-;XxVFgOH4(!yI*$*q`_JD36Ze)E+cj} zH#hxVmGY^bxf%Dh^GNF`8vsTtF*@*ASF(;*-GY)TaC?w(5MX}aad?2n|y*$;qURa^=?x5rD9(3HiqSK z5~c#F5Ppt*UGczp8&(aDO`$xv43E`7Fh5G*FkV8cMEbW(kY4dVjc0LCcSi}tYNc+1 z5h>0uonr9ZNo#t6eZrSjHf|uFiJbR?e%ky}2)G^s06EAd2v?pK+VoprLYx{GnH6)W7j z*2P&+Qi4Q^XF<$r|1oj{H?l}ZrEBPnI}gR+FgZS`Xo~Miu3+HAMq9LZZVdhI`(mF6 zrg4_!6w)G-Ts_cZ88oB8gtNZgi~Nvku`+`P_?H-Os=IA22Rb6yl%^6dy-o%PB_@qhYc{(N|C*bH_PQ!+cbA}DNho#S-l+wco;S+@_qb9ekT#k6G{T`A!FN!7*wUV zO^S^!BQ6s~ceQAPz+|y)VkyH?c+;U^A8ROug(^$${!Pds>Jsy^s4WX5^V@o(SMq0$nA7NX@HimkA2_bdNmA zP(K%=kiV!3BD*e}P31;-H#ET$9x5FTWJBv^VWGC(I!FfXA=KA*fA87ImWP%@azc>DVm5~)lJF03eKmX2S*l0#%;Qmj~^_W zTZq${A!gY8xzQ4PCupYFHA&M$0qITr%Zt$zquMi^R1RVvy>q_rlRiU%;xjh*ke`53 zR>7Zzx!U{05xwRpy*#S&8UNNKlz$24mnKy21-1_gX8W_CII7&lLGszSDAxY&*I~SI02QdUDzJseX2sI z5hv^yRG>^RZ6v(15C!7dIv%GLoNy1k)ic^}%Ix&>1$WM22Bz1{A1qgv){FF6i*tuB zh`bzR8U}T6RJv~>A4y7+$I5F6Os@GnCLIp-A$`ChICXCnUju|0XhIQsD?< z=9a3*Fw!faW_5ElXon%iU9OJ#yV1Tt>hV6le^Y11@bLpp&^SvS++_aO{O>P5{jK@$ zzWXyaTD%Iq%ay-MzQHy48-Kpf#idIJ)=^NNX|aDENAikZl6lW5cWD1iwb?Fk)q^fX znFvV~Z}%lFXxV7&>h2@`F~0DpX(4i)&nK8XJ|GwsFQ;JWyqx08y;VXXX5&8axfAx~ zr%`iYg+dVFs?>o@_7(m^pj7DEgdz}F!_UMKzohqk5%DdjJ-RLax)NQrfQw{x;hO$f zMKiGl)v6Sc57rzYcN&VA_|vO&ZoqVcAVd&)Ly8$!EqB3|ixt6zpuU7py zDiKS(90nfmQzP!{jJPFElR?B=x1SJxNvH2a(p=_LO)7$kT>zC}Kt~zKa8T-jh`m&g zf=&#iRAT{qEnW}FykhborPX*B_aV&}VvbdBfdj;`Fo`zX04~y7X7g8Ie@WtHpAv3b zVafq@P<}DZaZ265&!!~X#S&Jv;I1^lK@F*4hEANy*A}V`SOciNySY6>xe|2Z{j*0H-kr}^B$^P}- zUk6LH(LxxbrLzAy*@O6sU^cBph@*+?h~;|f)i69tE!6Xl2?czMl@Xr?B3x8PfBIp1 znlTiHN$ZAY*7EV=(it59Qo#U0T~ttGk+hqKB%QHPe2{{!W`>k3?%v8uEMv^|ivK38 zHT4ED0kA$GT~XUq?a@ma{6Kt#R8CEPPmtv)^zJ_Xcv^((AyKgmonp6V=oCT?vttK6 zUM`47;f};Bj36k2Rys`!eXm>3ojlu~@0wmd%3sIY*EeXlc zAg)L?J}dNTvJaA`SiRl%G-xPsIVB3RJsZ2N4Zzg@4fA?jw0@_-R%;J8+{@g^wC!N%zw5eU#olUFLz61wXN-WJ7jPPMGQtk^^R!U9|=DAumrU%Cq~$b) z2%QC}+7T|RmSyZ?w@y8r5P^UeN2&y8km10mbQGJRf*ItXyl7|lwdLeGCTg4$S*^_# z-vVmHj!)!cc(vX4Cu35`{<=n`hU>Fc6m4!0^^x%BjzUqN@|Ng4LNHGGHZ@4dZtN=d1cH19_HbB{o4z+_+ zcWW%ejL=H1rHw$@d1jG*`sH_hUhCThWw?u9uYgYMf0h%kBe^#(T5HsbQ+#Qmgdd2* zUP@6YYcjfWg1u2&-}K*&lLC@nlm!$Sfd4r+8JHF}1SJYCH4BxMrHU1|`eQowm`+k1 z#gHJ_>(9zat)}|wtJTy&G{{bhEf2C$1n~pMhLsng06VBTmgiwny4xbj)KbT<_AuTh z=SoPHClu{WG4?cJy<4TBm?mnzt$r5x*oGkq;rjW1z57qz7k(~L82|igdUjfDz^9Lb z&ES#0tg#q8b6+xop0wmEEQihJZq_RLMX8z4Na@=5>umZSnT8;b-77f_%Zk5zH_$6Q zMEb_7MG)bY1W|=FUJRi*SohP^g$!V?(YMr@ob046NRJmc$a_wOwKoRIRif^t)6A}d z{b`3vJjV-ym^kbHxi#r6Mcte7ED<`Wlz7R?EHzp)3)?(|2(=p;0r-Tz^Z6#~X9=F0 zJ|i_Rg^1Q%cORcUdlssYQp}(e3Aw9+R-}pwYqx6oI$8^P1V71Mvxvd$d@EpqU)~%O zl6=Q`7&^dmQZMM%UP1W4T8Nj%igY{P2SKQHYE8h371V!6oNkl~@vtO%uW=aCnY^W{ zxn}gJRVuC}!M$cT$@ZRSp|laZC9Wc^hbcF5dXn|ZKivX?VU3e0(z&o33RDRdmH^e zP0`#}W5rn0(+5lOOfp?9iT!PJ~QLaPa^8JV6uW{ghom8 zA+6$lVaQK4pi5ju1x!+>-+kN}?#EvQu72VlUe(+T0GA2zv|^L)VL+GrLEkyY39;{{ ztq9z#2?FA{wtnAM1E{i0h&wsHy~r$rGRyY`)#X#K?_1595>BB?5!tC~7v*_pWbuMulg4fMWbt zsr_l*qqmpw2r|;KGeio2lVVf$&@rylXok7&g?GkUHP@p!*wi2pbuB&kbo;QB#FarF z%8C~rga8=OYSYX;AbS9D{fkzXUW2O~Ye{e+)QEgj2$G4TyN|#7Z8Hr1ZSkn7C`7`< zn;$-YkPQa#4uys(Zdq66z5}x47%*^xQukf#t!1DTwqU!sCOJ+z{eC_<6OqNy`g zom_DfxCNViuf5-Ou@cWdFQ&kaS{OR&HX7xKcia|?fs`*)b7x!MgUd3(Q7xs-!a#x4 zhmJGy>o>Pn`f*}6@{MEVz375olMK@fnD)Y`?jGjBaTARyasAX_@d_9Qfz2KmA|+(B4iNFSDc0@0bAI z{?2Zv6JOjK)l`d+vZ*_OB62cb+$s+Nb90lS)<;hsnJO(ZBfm)vjY`IV1(#cRuKE?T z5#8eXaIEDTly2NzMp&5!6q{5Nl~p2yi-dhCdyOLAv+~{^WomvAr~#}iQ|P1yktj9v zUltcqb?o1wOKp$|N`SZDl#r=%pO~Q;NeU?ripoVNV@foWzejiv9OyeyAR@x#G2Evdy&4i z`mSU_9jTW1^zdYu4dro>w}J+%KQimlA*r;nTE+}(y@`BK{jd6i?sjm&PWP)gnPY}P z73oN(2mBF&EcHOk_hb6RaysPPO$5xIqrwjpF5W>g0?^Sl*4o)#u>`@4Ls^KtvCJ`( zD&{XPe?LLq9VcL9@##Wr0>oRa3GZdul2ry=Q}y)<4t58fL zmZJ65_?i{R#+RO@zeE3c)~>4)M{+;t*_U8SB0{LR`PsZT!=$=^H%dpwF3`S6t7>D@ z5=|CnRkBzx>e?1>r2#6Hgr%cKN0^0{FpS5N6dTc_BKL&Mr4NIz%=HQBCMU$k$JLPl z34(+ZZ^RqdkkW;+8iP0W8LCtZVv;m4OTz2-y`7=YHi zaVA(JK&6T_x#Ym;sy@d#xRvw-D<)&Von2hV)&;hCfj$?7gKc0O~g$Pr7hi z9JLPM2`{WQ%dKgl)gg`8GVpVsQ>fjf2qLKmS-cx(am^IH#zz_ZlrM3d$cPx<_j=RL zAi|qTlf=DnxQmOtk1NG@j0w}QIz!)=9+g5>s)(wiBI|>IZGpI8y{KF&eH+Tw$R4FX zdwEjA_5|fXs2r74=lxLW5|RGH-~Oq|`f8lr^$(_bo#`P~$1Sw3ekBy7(4Waa8&Op`m?AY%R2$@O;qBb_)!RQ z;I3oBd|5lIj4oaWd>`_UE?7Z-RR5${_yA!+-@{BPMUexSyd zSbgjf5w*VQA38TVs{_R_Yqwk{tU*9Z=Sv4pd~Z@JHAMYh_9qJ{Qn2+h@X0WFPa*fT z$H5)vfygt_Cad+n*VU+Q6PhkSH)Y0B2Q)W^Lz(*}k6L4>vIOhZAdYu?b6{zGF1MgW z?$H6Ng;1%zg)QSo0DsXup`fSMmHLwy_of(nI9Ea}S05k)TPdP}0d>_uUIl`P85~C? zrguPPCboPun>AeYPK5`)fxA2X-CnoYUF&Cn*)kqShUtQQH?x=X+^-0FC7T@qG`2{^ z{}|qi(fun8Yiy>cY1hNG-ulWv@9DNATT@kWH7v0tI*--vaF~IN+^I@|JR-Lz_#R+p z#tJ*QBPEmZ@(Q2d!>0C5CJ5yB@x?x7fEi_7nO{?IZCpaJq(jgO^O9WIUw6eKu)JD& z47Cl`adkUt`3q^yDpIqj^hLNy$_q$CKQPImAk{m{iiFe%jyxLf$Lf{Jh~*NLd0$K> zAQ~J(sSFob5v|RGf_++x+x$MM&&k9-K=WhdLsp}e%gb#}6H>zP?7FnZr1~tAo{t|r zvk>8FxZ7#%a)BA2le{I%w*1iBT{ej@OBE$z8U7s#MX?j0G;ws>q`1Z|iChvLhy+5YrCW*O(PN>8wCTK%n` z`r-@Qi>528Yp2^?_1zI-mQ4I%u>-JJ&eXLlSOOs|I7}iI=xFoxr(XnD+dFH)!!U*J>zG>SMYX9n%ZE zHaOy9(j&!>Hs+>bw$e5_QIEz& zEOpMBFo<=jm#MN{@e&>>gQbvKpFm}pyzt_XhJI$EVtabFix!=$E1wm)Nn`k3Uui5+ z3ZrBEq=L>TzJ(x~7Bhl)C*{|@r&OKCIV(F zE5X!|hl8B)Gu3FZ8HoOjL_*0y5NYN$fdYnzp@P?#+2!GW@d@XBeMKnf(Pin*brG~ztMhKaBTG2={S5${GVE>P zUWkLN%|IS;e~ zDI4qzzqmI-*dI>g5^)~th69tECCRZOg~&;W_k)m}!nC+RJwS6M4!jd{q)P<^RmIFjM+&s^vFj(v3z;>(1m0e z4+SetaW6-$`nmY@a7+X3pL9hQZ69@N#3mL$~RT|iM@oQrW zSbU;X!k1{+Hi@??S5_5yuK6jbJDruCrICY1ySk-f-aLa}#n|UKePW$lrc$vCdB{i5 zRI?I-hxdSlwgXYO!GJ^7L-VxrALeLg~g}E{5+b4 z8jI|uBj{;MPNz-!V4=WbRmC|2!<)!L*UB8Rz&);~s^q|cDQpljNn{tdK=&Del z>86Z&Z|GG;bK>vsbGwTNy^Z55Yb03|AQTl~K8aC!MX-07oZTmb!pIF1OXU+zWGV{8 zn1@&gx%6S7^73s7Dr+>3qkfMcKl`szQhxuN|9loQnHXtg;88Fsf*~nG!BELYWG4S>5C&dy)}m7G<(8c+(>BfCZ6pPD;qR$+Q_r%v2$l z^tuYNV}+%^#9U2EKFksO(@ADzA1pmE0ai4GsnklUt=m%*?6pL_@A#7k3#Qa$A3H+Y z%^$~%Q92?HP|4x6AXKeVqHkY+`tw*gmnr@56;w#8XNKB70R?jPZ9`Tu>Xk8wH|=1f zmeW|pIRvj9R|*Y73;Uv)nmj5sY8^5_-yB-rZG#KF1{!Hx&p0E z+XF`ogN8otxT{5+s9LmSD>3sCVK7!O8f$G0w=5Z{36ss>)5JSJ1YUemHvN)G-tu!z zx&G#QQmCVVw7*YN*H&Xuco7LT{(u`JIT>{2ir2h!pK5XBfeB3@5dX_7m&j&FX9-@u zU}y6uPYIVdMmmVtX2~jpS_E7=nfM*sjCQ9mvfKh32GzG!&<{|AtBf}Nadsy_s-6>D zG)dGx*HjoNdZ+My>~~xJY~mQ?Y7>@CZAp8s-?Qz9-NdUh^3dC4@V(Z}F4bWM+9<#N6o2W3FY<5c?J?O!6uSd~gCN+6RC+iw6m3 z`fXi=I(~&^Nnag0vHu;TAVjpgBHpsKX`L2)UED*j{OaZ|$zU@D@RD zqOpzmZuQjrW+D>pECtnKlOA)j6O4U31HbyAVD$A zo`vJn;Zo=!r~7P1Hne&MRca#{dPUDl@#oL$rga{iknIR&Oaz_HZBKF#s-5|V!}iK z1Bjoq?@1buA3yRtLBo*M2`a3Pu1^`l7&}$ll;+0zX)|+$&BuDU^pny_rgMEuiAEf8 zf3Ujyau834-N*%&?D;CJr0!P!z$(SUnFCb=8Lt$}c@Ev<4i%^b!YDhZXad z5Yo=N4}F;!Sxv%0g2bP$PE+?ieNaNT^ad;gs+8s{79=B>-t9Dw@dtkxy8{&Yvq^rRqV>blJPcJ?&X>%_lnhmH4TLZ7SRz1z(T@sH zt;He)c1jW^V9&?YIis8P zsKD`5zglLN-Pd{|d@^!@K+l&GKvS9I9EPOdP`t6&_QG`Vn3K5hC`8Qh_^QTDcP8VK zmSspwkyofhO#HlU2ZZe$W+PKuV6nX7L6Pvwm^{7)GwqM&D7yXm^svp5jT^Z_e67T_ zsMm5S1tNpvD%024MAEj$sAfgq;*=4|n~4c}^z4BTgKB+6$4HM@4}s`V^vKK(Dwc|32gpNeRW_oR-~DivwXMq~?W;?6g_^EWzW(_)a+f@fH~}hsOlkH0|!YkGt6PYn-0Xd;g?wv8ruhMAUBS?qxjJKfN<3yg95$^Edn+&DlzbKO6XxW(g zJl^eySg#4Odro18uN1f*ow`{F2=`O?@q!DTos@Re`T z@=D1=?cvhQ4VrAFf~E`xwCiv|U;HHgBr0+4PBMYkAPPet7mmdPX6JN=c-&A5pa8vbP*3jq+?XPZ(C zqfmM)8vR|TO`#c<@v44bJ4Kefughs ze=>~>qb+s7GMQ9p?FCU+i^k1#>SUUk4czU+V(Bo$`i44_lHXP@OjcWheXm9Y(W-B) zmcF@JKxUaDLT}A=v${My3Yrl6*$&QsqJ`gWHc)I1! z@iSt176&b)$I?f%dY#O5Oh$#&EFgB--Rg&YfdT^CfXt7Z0m5bRk@tuACAl&=PlhJZ zhONZd>sTzrS2`pXOWaE*dv5)MvJsb5UHJ|P8cPsbmRE3R{KV&^h4V+W987-YgPFCJ z092}+VG#HLX7vMBAa4%+IXKQ_)iz!@($ht5_f6~XPdO;`t#5r1grt`T3eW^$x*lE- zOz!pf2n(pR%pamvzBtEOF*McOOr(d;9;EXjFj5L*4d*Mj0^hY2YI$c9BK3tc7+O@^ zN$@n|BJ~*;gvH>6(ql|Hrdw~HR^Y^84@g{yEOCaYn`SP=TRVgW2&@H3NQ=^tG#IAY zHt|hd1UjP|sEZ*I55X5x*eHHff5fvIPLVT5ki;*!oH(dKLq1H)IO_)w9zHO!m9kcW zYYKPH!cwxocaM`d8Q^H#m};q#q5_!irM*yY!+To66{wbD=jC9-k7H0IX4$w{o2WgB za`cU#u{uR=q!<~~fW?N)Y^HXZmGM>e^M;4GwLxtnL&Fjl6l_PqQp*7)7{j=L^a2wE`fw4zYBtYdJ2Ud@VcKIRrJmOQI_3&;dZ^JfpQGLDNY z_SH*kU34Vf+MsbtEY-BvB8n!ceC9Z83WC=N+OYNHG5e58DksV;=i?k21z6$Pq7M)O zMIS{w;?%vN6&Sag9EF+l#TTaWK51n{j}I4$vr{h*S%*lVTGhW&S94CrcE!uBPA^uw zhgVkhsM=`pf;hFEVL`C;#<#=6Qg<~xATCL4v0g2{eU-W4&wq9I58mwPzLTbR!_Mbt zzg`f>)vv;DN(vko_pcK4Go}VET#!Zhc%4hxYu_d z|M0iW6hrbFa?8MCJ5|$H3>Jk6|XR+;Ud0FXYobx@y?0HGQB-`dvRgG``NEoR_T)5%!|$1c7nct zij1Xf**zkU?iG`Y0I}uAE-o$lj~FO(aPO5)sS03NmqIutPp7a3;1k)V!SJ(@-)Km` zfoO$)SnY57e6b>1Tij}hGa)>ar+@_Vrrnod1!?Hd7Oxa)Qo;F*M|XP57TmKt-C|vZ z$2X+q=!c(FqB6r__BE{Uc+t|srQn5S@aU%1`7P8a-VeQzFAH6lhi~H%)3Xo3Yt*x$ zHR2)E-`psqWQu5B1+sb$)d_lziIm(fv$MvPDzqMtoha{=VW1Tv82+s4r8SAf3{D|{ z#|jd*G$#>o$nd#NyIjuecdFJX&t;Sts@;5stvk$#Yq+pmQ$-{ z0wP`*pm-mUNqHn*#{lh6`2EGyflhy!!hJZiU-|P_>DmnWETpu@|H{`UA;x-?YH)PgmJiNb7!xVtaY^dW|TxG+7L zvN0-A@U`3AHT_0TG6iAQr5(=#eFO{5*p!hhW& z{9iwPJ!NiEAt|x;yv+Brl476#x;E+z$Q-2wDjx0^U7(%M83>yuc2IAw^^?vbKlHYt<3tG0n0TjM>R)kO0|xIJ|;e> zLgF+85&YseKcoR&x~X^^o^L`tnz9~$v^i0kU1qLL9x=n#(rFW%#BIUo5E}#_M0k6d z!GGixjm?8cz~}rdPGXkkFSa6+U1fTE8)RiA|7s?L3vvu2#=EdkE^abq++4WgGt--= z{B$t+P>k0TO1y>3$}k)*wIVevARdnUPA_9GeSJOt(tyW>*@1ZBcv1*zBEFbhPu}Ld zW>QDGI(Bk9X9NIhm%J{Ubvz!Ci~C!VaAG%Gn>!fUh zB7lsYVJ3{Yb>mXmKyvwHSSNm|6^BARvCYSi(uesVaH)AkSXUHE4Vfs&@1<K z80T0%Qm#JI?)&?4xX|&x`)wb?=3Am}aX`ai2>~cJrY~B00{X*{H7pG~cUYKsMIt5^ zN9y+ck{$q+%W1uGk@zyTC=1$ba54^Sf=IwB4eGRJ1vTn9g9KajtNA-Rj%En}5^bzu z=W9OZg(5np4`ggNu-ZmoTycVm%o(XQT9NTj7N3Ol!nQAoHMr_ZZ4ZK}u$b!51ky{O zUxK)=+_1i zGfKa`kN4n@Z33&0vrq`%D;2;N)-D8Ge3&kRCOv!90)_(!s!2*7giv;@1zQ-`3}H#r zA#7rc9DGAu38HrP8wk?xtOOb4`6Fn|5tn3T9pRf{@l&<*6{_cv%ael6R!#~rZ(1xo zEA?Ik+)*_tB?tvi_Vb1h_V)g>kK9vPol8v)j#3|RTMLsB&H)8wegH^Ccoj1;w)fr7 zKfn9=-7meiz1>nXOsL?E`Q*OH=6)22g%IhoR(OmgNFlpAEx6O8(UjQVoP45}0-5W= z&(aP)dQ}w*p%Y9EoGt#dztQn(Pd74lax(ldS1a2gLX{cZ$8kUGxdW`%-JdH}-iT5)+R0l-4d+~QQWH-~%(T3ni(R|Vx$|m_ENHI<3 zHJj1q9I@*Ua1?TR?yi&gMm5fTkcu4WRccS-E?dq~H0Ps7asB>Ut z;5rutBrL7VL{x|Lc-_L&d|iu;`+%s7KhmAZ%Vq7^;+V8fnQpXsKYTPEqi~!gYEX~bNT#9sCnh*# zNMdV!c@VGb@aob{vd;mcWYVjL1W0k{lzL}$USue9ZV2=S4dh#`Zw$Po3t{C8M`{R( zbV8ie;Ccnn=DG;IR+6IRT?1*s3$LX?#$>~c<0~EvspO05j zU>yIs*5{<^LEkAtXf!EMLm5l-{m}`}uQnYxsPy_3M+;u(CV&@Z*=xPzYs@Q5Ri6wFoBNiPaG zUe7KPi#ckbEDOL*k?6NAmIi^MNmF z_3o=}l}VdFFqE;+{4=Nz<{Jgu9W&p#UWIGwib`?W+K`}A3GJ`$uEaMv>^TA`B7zo3 zNz2tfag`yUz|7eybdhxGP4UCb_VqG;{H2XA8{1ZnGH`Hvi;U$;+tlVS^9QixdmDPWJN<6#xyWsj%Y-+A0&9c?tl74S zn-P+ZUjXH$NfoA(@6DjI?gh!L$~II(rkv&NSbDM;M8okt)BBAdE zXZ6#_5@kO7%AWNmO-;R6RWlZsL8aH+{#fnZM~Gb(A;t1`tR5(SF@DR%VKVMwHV41SsEog~NJp~FgGZ$=hE7hcPvWD5 zXUkQf093i4h(W$cGi_y~7t7!}0#3FbVPb?6Uqpc%{F$q>PMx}qD z(3x|J2De1~RBpG-5zGuzW(+o_CV^0pB|sxHD-{F27QWSekoo#D_$bES=|z4-^OQf{sx^ve~z3Y zezj6yGT9{32tG>?bqcEnp*v^eawA&+4H#d!rLDfdg+^nCvlLYP1Q!!sEYCP$==3*= z9RSBN8>&Q&?mLOmg2*RE$b>A8nc-!x>=nznROGGDTmbLC*zDv^U0t~U_&qP+ka%+cSmm_%A-mG(*hQ6ArhyTC^MQ~ zdSA5dxF|QUKYAMrlclQ;%drxGUNwoeI+x-6{cruw@A!+_QZ@Y%Glw(*Ng4Q|cGQ^x zxe>c@okSM*tc8`tIME^^k#BkR^8C2D>BL;|n-r^eywY4{+|w3+YK& zRL9GzwGYSY5}I5=N-R&EJjq4j(`|$T!gp@UG&M09sV2XZpNJzXOwT1-r7NH z{HMx6eGr(XAOH3NP)<{u{{MI=@iUf=8bKtcj8cnW7|g(@CJ52nQX@nhTh|nmw$4eV zL-VY;j!BJ%I8o!iq~7Phxck*V{!bRc17iE);PNJ_pJRDC2<(#r$OTqCZqAJ#A(b{} zG4Z8cOd8ZuaeoNu5GiWzPtc4fuk87R!H`uyfC}hB{8hECRYD?!(Bi@~UmDB;;h_+A zu>hGfz&!T7#94K}kz6W{YI4pWAt%!Y5c%rUeiBKMh5@i_%+Oe}pvuJ3Sx}8&8Q& zEO2;Q>wK66mknr9;HC#*YshBuH|*VT8czY;lm#CWTVTbK>bz85<{T%I2jvF&ap*OL zRpJ_8%zL%~w>qoqzWtMUBMIFyGlJ%=Y9s=g;_oXPonHJmB@Bs$+Ih9Py4eW^TYniG z3B+V-m^+PR(wC;iIa{Z6rw)3!H^vqJFpC0Kcwb!3=St|9H|Pp=LRzQ>V7V;B7!LCW zqho>uI;gTiLI6-u^`}0QeIg8YnYct(w5KD*7`GQ@M289J62Czmsx=iARu&Zak=%Ms zA&-kd=MujJIl>HaKS0h8)qcgNpnMCCEQzaq0zKAzn>%Z}J^Fub?(W3ixS8X;6)LsQ zA6Ugjsf}*-eZzScPmoqA*%ep`AVq!38BocUw(!fj1_MIJmqG*Omyn)jfTy&94;m~! zdFYc<@_><;H73R6UUM&^RuvW|lEBBu)0)Cu?W|;yt|M37uie2=G`;Z(QJj9ZniNwx z=x4XwH2u!vE2xN5waD{Vwvd!8C$_R~@vL&{IWzNZH2p4UC$%0Wafx;+F@k5AW(Gk3 zgmxl1Jp8G`3+F%fkCA}V2;602!KF&5o4Nf4Ogvayag{>bFi$2FeVkOHFu zHfinK8WGGwaPlldxROxQZ+3|N|NOJpKN!IfEYVE$6>=cxtjr1_0Z(NZaEJY6D%g++ z^~(gCQ@?ZE{g?famtU2RdO~gz%q|ybXH-ks*C&S`aB?yQyGnOQ!eS-Ix-mK)6)m># zWFaIrR$fuY%%VXU`aC#9%v7i0Hv#+*1Hs#tZ)YMb0}qOCVJkP?9g?Ci^ZqzxrFuu< z>Q8Gjh~kdhGW-c&fu8x_u=3g%9f)Ug!oez0D83>&C=y{saD%Rbd*gl{ofk86fky6e zZirsm0Tk<+g3Ds6Q@=x=1OxKSD*o^m1pKTOb5%_HEWFUYE678<{=;bBm+gXB4PHG` z;wm*JWEb~@>-GSPhb1S zSHVad@Btd9uo-dn|75J@$NtN*mR|6`TX1wc zqhMB`$z+$=P5gr3A?xQiUPqrPTUkj6mt}I`$)noZh%pZFkY>YUi6_cK<>N$YL2+H5`XmP32uc7WxDV1tYC*$u+!bbl`@uJ zmg7>Qklp$*xE{%zw9l_a>fzr8eTm^U(G}Z5#VjS%g2XPJkG)Ck3J4;J?YD!!JyI6Q zwB91ORz@Q4?gQOfA)n<>tdWt+ORyn`v)|F9f(K7#d{(?E1&jB~g`<+bmdrk%Qi70N zwlOv9b11SotwFi}rN7^^$3CnnqLNk3qdh1H$w~+EuHbR59736w{&H^ zfuXWDC!=vDI1qnCsZ3+}?zO|vYIk5_b+$V}(a(Q&_p2bh!h%H@0jv{5F8;bdMXc0B z`sGBSbaU+p4@kbeSGi5#t5t0JLqw-B3N2GG38%%mYes(UhPy(_2jOihuN2V^Pal%% zD|Df#6xaOhv~aUv@O5O8Y8B*O7g!coiiP^@uu>Iq_NtFKYFURt3&0J7!Cv31+9KTJ zKdxw6QINa!<_~fcfGiK`b76~~+EP7J2HY9!1)bs*Tg;ZV^GFX~NtKe|B=myoQXAyc z(aT2uYbDmwhT%frXTH5f%HC1)t}Uf!++KIbG`S&=haB@J33}9IoNAU%lUc|XP~P@F zXuyvLg}%HW2u3ak-bExKlhXIy>}C)MQWxkR!5(<91W%tldghmGjI*<(q}XLK(4S+} zgqZIS%UW?OUU`F*tf_JgHam?blKmYT)zlsyQ z`$(K($ppoA(de!CXpb^5EAnG{9$*}D9zLNBm=GEj)?V%sqv?7j#1l$;tNC^eeI2jm z1Le@A1DRR|l)m89k|U}}YuEEu&w_M*mYyU8?B#aXTLjFJ37-3Y@;;s)4-?72jT>ZR z0^-@F?zZLIORb;=6(}T*W_PL??dD289`=5maX)OfHYA=49<=@Zr+0s2!uLoBL5xtm zM=5?l;CM`E%tERg=+QaF7jTx-M-Ihojh5I+%yPf z3&>xjA@;|=8_$oCL1qL5y&5oQpg=r_E~t$wiL;!jDl1J0jHYR6Y^efF`1G(eV_AHV zWi#WAm!<3uMB?=c10YYzg(cO`c~rxdJkW8rs$HI%+<3$Dj6~s)r5w)FmtU5~`bgRn zd;{Gu6BHK(Wx&0@a)lp;76SF^Gxay{64pjgLo}h zh0q(Au{aLOC^Vs>+`C^$kVmMCXXVV ze%~IEy$W!{csZ@jI?y&GAcrB{_{rZL+(PVPt@S~7!x}yv9)_P)ykas2DU-$|^UZe1 z!sZTGN}=UuZHo#k;>M|5A-9b|U~_Pj%R^Vh`6B|YbY3jxEkPhLH{+aW|2QoL*^pv3 zA0z%gJjRrZAC}o}(NX9Mch87pTQb#YwJou^P+qYP%9r9l-`G9e zo)n*Juk)2J>G24}C4iNJcw1apbSzBGrKaaJ8dN+WGz(l>=Ian~HYz%l=8HuFexa?2cPj7; z!tN+916`PY?LTAC1Jx&5ACF@I66&US|Hn!O>l41O>+mcEfd0iS!&ja`An|zYzYk9-Tolh2Jc5h?sGj zjKs#v`B6fOCu}{2d_X+m{rd90li^~2t<|+=p!e10)>gICnb3Y2m+bnWq-iN4(H!lx zSX3fVOpO()dJnRqnQCQqbHG&c@l!`lY@n3iex>|L>IW9dSp-2oP-Y(lAzDWxKqqGi z!nG+`jYZ26R+$xjk-@i4XD!}M4GP%pqFg#FD3{pf*6wQX2Ap=D+FhzKy?d@E`;D(G zJuJZSmfIA_(N9@;vgq?;I>z*Pu2^S!U7W#JrI??F6(!3dx+rDqhNYiEm4OwBjSX+zKb1rzn=A;jzNco;{9} z!Ym;#;wyPhJ|cs280)EypFDbMYQ6H%Bs$h`tAorStG(fEiUB?TgV8#Ejj~uN?0c!* z&u&psW}CUrF2$;QJ|Ve|O0@u0^WNd{90K9^)>TSc6o@-?0C3&9lnM799j`{ZA! z@4#FW!i6P&S3RgISQjxht${+W(MVA2xTRu*Jgi|}+nMJm}G*XTCgd=n;!zDLrA8Z)XX>fh5#|i{1#_=S}aAj??aFvr(Pl!{$gfe6(1U z41)iZaxJqJZry2-tQ*L9au`4mEsANc;m4pB*`a0BkbhIqPg z%vHMtS=aIuehO2=aU1-z44;}H2kC}2>X9w@6~Dgwr9a;$;uf`qd0K658qaz?V|s!F zL4k{4-S|L9>KRzOQ+Cewr^5X3e{mwD6Q7hexMQG-&D-|he>^Vbgv-Q2gRKaP{HSy@(uF`7D1oWoT__&mOtoU-jQQ!YjlXFcKhRo}jVf)KEIZ zV&b=spI#+03o|{Qh^V+mKE+q1gY%4l18Or@hlt|1&z&u`y@q> zDe-Vj+)frLET5(A!0FO@(NEd!t#*4MX(k653dgya6cf6CqZY~3Y54A}BW-F}dp4)G z4i|le!AWeAX1j476!ydQ@w)MGNe6&0M6U77_$=N?Z+G1ryk@`rITnCL-)c^MFUS+| z{hGDGgYwgsNAumHP!qF!S<+Ww^PnH5j^RBJO?tQvOIVSpKNnPE4$p$-%={XG&$BW= zPC?{YJgIzjbS)Xcg(pw^$LrArVyfI^O=X6$gZRhIliUoF?>OBK6l=vxL z04%{tV=&o!q4OTJS20&?OIe5v%`5KZKP)`W;3pW;yy`-i)(|`jL)y89qo4&(nxAn+ zLko9GUj^OnE7<}xqr)&^_OeoaMc-sEJJCq3V*x*Bz{a$1XCXL~d z<6OS~vnfH?BCGLUK%P~jgVa+KVtyxg2gZ+AaB;Y*KJe?--JB_p{=g3PuOZl0Q3PhS zBO+YIYCdcYoJ{=AFtr}Q5aKmVc=2N|#~(^|Wqn$^30yO@xICDM6)u^{v_pz`F0_VS zF)88!H=I!OoYLK6x+L?w{0y@B6@$lIoSrw<*~c>UXQ>?k7MYt-tt8i?rVk)I5~Uht zcthy`g9f?RH6{*YOM<94;>A0kf{K`rc9RoO@BjF?8X(6~+ZZgP$5%nf`J{OcXX|aO z4?hspEbgxg;HQP1n3sq2i1*xmeEvL+i!}S+*7;k9q{~F%Tu5I{`Y$pfrVHj53p7tA z(7&L_>;N4e)EHb%-Q;xX>7v2+QI{F4E z*tO=OXK^Riat1re0*HZQi3hhOPD91t`2QyD-I^oalJmU(U`5!zJXlcx3P3du1p=r>W6N}eIN>WtDC8^IcTI9;$eFP%tNCmN?M>s=jZnT)Fc2W@g?^ux|+1SdT7t4U&Pi5>%l?b)*J;WK<82 z4{y>qp9O`^Klqr?2{f5Qx(D3|(iE@`whJ@n)3kXAx5kM) zps%br;0$J4xU-jfi($H_r$;7c&h(Lx#P%tRe>*6RBPu=Po9|b+hEb^Y%vZZt(eb0qy+8f z@z^PBP@c5ms1$r?rqg9F24FD^lrfUe|NEz(2bs7g zXwnigSp$Mrn_KpGd)JoOfLm z-ZJb}uGO=}GNE~khFOORVSMIdO%1lIm!NlZ?ehh@Z{ZF%Vmr_GO_K%AD}BM?N3M4p z9#vJ|<>V6BaY=>D`kQ4#5egvf1#GK7?)5<{h>N4%Lwg71fiAf>t@OAeiDvkX3u$P< z2~4Y%;VoW7v5ETA{kbp|NMy%c<;c)QCKzV<^`4IeGXt2E8EEsa_8G%+fNLt^ZH4Pc zyuh3_>$p?nKIkk(`o-w6#baR*{2i8#byE!$eVr zgb@}#Qsvtk&zO3Qf@JcSm_OFC%#@w%twjf6@4Y*GpE7YKKK$r|i7KGb*8UY3RpbXq zq@X6L>{izthrGJkS@Zfe_L$YCq)^8P`_0&_z>KTEt5WJ%8&xIPe(~H1v95S0j!v8I z6^Fa@ymW0<@0;rrb0V@DUtOZI=EyiwSA|Zdr(W``rn#DeTf-0Ilvnf#O+|745mas& zDDuWw&oL8vFOD6i(h7!cU<+5PM0+UPA>1=Jbd7vk1_00Ck2pj16HU8mP{$aN^u5IQ ztg|Jt{pmr;(U~h|4Yq`PFdb)JsM(Lxoa6!Y+aQD>GTmwXJtQU702Ud;BWy7`399Ky z9ySZ6)+N3aZ3Io}WzcSfxeESva&SOzBL^);U=TSdS=#;@p}bSghViQLDTbMGSbgn3 z05z$d&WBtINaDYDs60%&2s0OV%s&$xr3c$|KRA$iX(i&!S#ZC6G2ZI#0~jwM##J;{ zHcCV*sXe0pt7DJG1{6yr$kr?{R6ZJP<+oEsmyAz1n2Ms@#}#VFpPlLR^x)Co$syqT^IHV6x;y?BJr78O6yh|%ugtH;mCJ+9KD%a2yGwp4PEYOouNCROc56L8@~xqlxcL2*E&l7I0qrK!0^ zu`t{?u5dd7-x8{K(cqDzIZ=zBykb-AK zM;H|ptNow#ovmK13{+d%f=8o*cWWqbX&LM)<@Im5n#FF&ssU5m`i15r}wyg?vI`r{|9rz3y`o%q0)Gb+A#J64QTc zm?U`i zX&~=*Pb26s0OD?OG~dbxEVokGqsoDOirwX|#zPU4ba{)FJ%6?kglY3xEkI;c7z_u+S75>1MvZv8-PQDYlS>3kD!J*;zf21;@t zhqxQiA#`6|WC2wl7hRz0JleDPg?I~9cO0EDB(13R(Fa7yWo3RU9DoP-0XvMW$QWIB z^L1$&2!;b!qA0!B+c}|(NuY(+WK(P1lSK9KqBN1A1HirthUSE~A}i*9hJ_qWGQOZd zWa4Yy4Ku{tnod{if0?j#(c7;yZARQ48|=Y0*zjhH83Z|5RxPFhu9X)*dH&c+5&99; zR#eX`MauCNe+E#}#i4X#n*S?1Wm+~Flj+UBcxZm``49i`5C8xCpI6ZtUtup7rhKs& zSLx*LFvIFhl|K8g9$rqv&+~c4^6kU}C3TNb@Edm3!=Okg?ghcmAS}p$QAxOWz<89T z-t^*}wsmOBSYR7t8{ADGkeFpF;Y8Nh4U@Bs&-u_?H{PVOev)yqyT^1Z@$~}-o>n{l z1br7Q=FUb(Hv%um8n9Xo<-w{-t^l(&YiAxe^XZf4@pgo)Fl8nS(Z0!15vD7&7+^$G zK1|8O!_kKdIquU06>Bn{-rMT?akNsO<1QGQ5iap|+CfVa#ol)YulqhT^e98e=VFu1 zi?q?j;z3#ZW!HDa^NP(EN#|*4u&o<0@2^lfM-DJzcyCJ_>1*V}#pH6Lv{_JDHs`7j z0h3L+nxe*WGfoljSxmO_rpTP$%7z-|_`Y^$){w7TSMYQVwNFJtfg2N2f+r|I0z~7_ zQ^lr~VF_K3a-m03oW{cACngYWBxrbK62s)*?o~<;1e*1vUCYDpw&T1mRYmCYq6cW< zN8B&mB&Tj*yRomt^g=Kxwnio^&Ku7sclmm6ueUu2D&MlF%n!?4Bzhh@!di@vq2@OG zvA9{VyRb5nAHT}9J|a%W>VuL5a)=1|CpbM{iNSKfbR3%lFr$zc^97Kbgg4{+vm)tO ztOO>H#T_sVm_#!+?-{WkcIR0R{N-PASRfH1kD|l{l z#P;;sHBs1Ybt^BY$0?ETKoa|XUEn%dmD4-Et20q>X8=;xFlmQVujeYF%w0dabS4vm z(G&0|Zp3guWe(efU5ZpEwn)jKG;DnOdidTqRro0KNU~VscnB{2+5KOcp>U8|H%uSU z*0{t@$T-GdNFx~0*rit{==tVm?ryJ=vX~|9hjGE<-)Rx7M0RKUMc$JH(04K z(Xki-gwIusTN`YHAy8dS?Zh;*3zEButW1kppv51gxS9DsLFJX+o6!8X`zFZcNzsxz z*JBWbbB>3M#TtU0wd5#LY8E%N2sxgsve_SApISMRg`;eP^Dkg*+DQLA7BpCAvUx1| z`BIv~YZw|nBs$Ypt6_V5klGEu0o;*=q@u-!RX%`K(re(#A^avs7L_nCkzV8!cduj! z7)DTHgR(Q3`D|X0x-30+7@rsnOt>1RP0 zboiuXp(W;sWt-@q|IMerFYB3&!VE&LE@#v7=g*&b4Rp(ICVA0Gs>h<8p+yJ^v$3V+ z4(}$ubfZSArt`t%`h1ctZr%?37Zpg)Ij)yvZd2}(=1LjVsz7|o=;LX|FzJjiJv%Lo zxl3mWN*YSIXO+o;;}Xvn;ITFR%)bI-z2FvkZ-jS2A7ug3qaQyGXJFopeQ4BAC6g_ag%6^~s}m!&KYv zNX%tGWR;Lp+=i9jme;Z}HrRK7U@8Oax*D>AoQtLH6X;5~>aYEGX4H4Yck;p$@kn#A z>2B_6d{o(zA~=OYB4bb}qbO0Z8R`0i6+!B2tI})DL%038ah;=kP=yS`DmGJJu7q!G ze{>Ul)j~Fb4k*p#0r{QKw_h40D7CP(;MKG+A+5#=AAxX6<%?ZIf;1kzytDJlmtmDn z8-H?0*H@n*uwvQB{)KGBIq1Ej30F6@Z8(XP?B@;blLpI}OI&(CYXy{YX?JotN@;r~ zd{9x$Ah9mvpot8=SPIjo^)&eFHZ|c-;=u&epwpbKA-9SB`!LD6PtPwL6Ty<^Fo)=; z`5-l>;l<=Op3DuS@Dp;G?pL$Uz%>q`k*U*XrE{PqA{~P!_aBMnXu|}hOth`n{WyM6C+-+t92Qo5O#V!!N8fZd zU-$gb8Qz1Y2Sr4p=mk(f702^hHBJH+IMQ@Ks3mnLV%jH~KAJa7YPLi?wEPD*w^$2k zBiJwAgt@lSAH+{za}j+|6HV`>eR)v{fpt{td&>%`9&T14gV_sWVVs3d%!7|MzmSBkMR{ENsVaF&hy^FINvv4 zkWeAkAFJFXwCKMnLSo_EdONbK0>PsSPknFlW`hEdba8SV4SQV*q46O0XV8W@kxpHA zDTE)jvQzJpsp;rdxWfxm)&I0`5Skg)Jb$OCM8H{r3~{(L$0)gIPU|jJ2j4#plG7-~ zH+2U4%SXr;sd%I6>&XDMC{}C|Zvm9B|ITzv$^1hri7vEcU^~&k)B!{qXM5 zc|nX8d0OHlFKGr#Y9*m*>fl=2QWH&R!;pjCYRwVVr5)wX4sIDnEFjvTz>N)(dg?^W=re7PD}BI{XDA$#PF%x%PY zTGQOv2eM|EBW6_)o7STQ3W21;4+NTMSRLGRTniP%Dz&{k`Z~7u`Hw#R$KXv_0yR?H zX)*jF!!Xs~$1PSjI&vU2FT6KK=ok07r@?Z0bM_d&A@HRi&{u(?VT_Wpgdy|X#(1Ef zyT#Q&lv^4#Q1twG5uRUb&aNj&anMm^MXwu{#T*xKm}D{FLzfjAcr(M`qL}=pA=OLB z)mq8g*=MQ#gbXN%!-V)Oc(8CB9I>X{bI7!+P4jzZYnvjTP7y_#~f^9F@|TB>)h&-o zxDo)%d69a%hzf6RZ8$2pAZih0kX#9(k*Ck=f$jx?cGOc!H`ekG)k}EVqN2~G&gRa) z@`YhX^6L@%GpLV0ubv=TUO}k1lwG&crd8X1I8l>u&L>`fi#w$$esNLBtKOyLz8h!P zk_E>#JBROhMr;Tm6T+7##uMcyf+DsvvL3|r9N-aDDhLzWZNt*@7CLk>QHOk4b z%hNk?lsSTH?T)CFw^CnWoE&IC#CRSTZs1}`AJf-inlbA&7ZhU2jNxm)Lv~0K@cyGv zOTht{*OP>nk#9Y7;Bhv9OGwX#R}0b*;>iNxS)U7TSrK<#klw@0-n_-TiaWfp@YolG z`Fp~+lSkq3g2R<>Rdm$+0rB~}Bmd*;{%l5qIY6or2VQ_7@9MTrS$_o-JQi^=%IF@R z#$~adiH~W_iplmC&5=f$xE$n(uM&I(BzXNa;o66JH})r$YXVURunG|QqYlIADkl3F9Xz) zzrdJ+(aWu6IsxhPyZB)8WGalfM6+-zbr9cY#4^;l`zH^88HFJ5upXY=(DT8d%R670 zx3?(2A7k4whuH`CQuF1Kp%U)>fXxrM}=IpyACudl8$0wsBTmSw$;2owyQm)o~;JEX8}I zp1EW`?p0%7K`6%2foyJeX>qb<1*tsKiU3cNi!rAN9wLZ`_70#$4fZI%cPWfex!1?K ztfm9Bgii(B@~bF|z%VYrkXgIB*oVp!^75+=^t#ll1*^n|xoG3lxOG$rK$Q5?I^rr# zT}_L?TP?;(m-tW`(lSbGZyH*2$6+7`QS4H(WmgZtf6s?I94ouXJNzNO_KrdyI+pk( z`sd$5e{U0#bb}W7dvM8gLYQB;{yyJhtMfEPDkPgFP1eOyz4w z_0ftSAEd*2J*grg*3*Dk8NTHS>CzD~J085DD!K9oNPaCynJROGIB-T!r zTzw8MiY>}4WLA?wVfeh)S@Cn<470g{`s;_Bz?Ebjng;a$FxB2OGN4!qvBQfR7Kc&@w=H(qsa>VYh{GK1EpgKleIy-9Bo&j6rGt(RW zE@ooQ)w)WrPYSIMSPRx}&``N7%*o?#@%v?wkFV}B?l@2@Yy_8=O!nGR8Jm#qle74z zUvNyon&^wK_?P)#>~Z!*N;Xo}fCL?va|Pxmi_y<`m}*5{+c&NQ6NPp> z%yquY1Ol(PzApCGlf}m-p#s3H*U#$W{EQMp_x$-IpUCCG*lPkni>H7M>M+(ay(xq4 zoc;;E0!fdcB7H9al*IZ^CP!WbG`nFyM*Qv9zVol*I0PZpTv+*Q{ya)-zFWf^J~Amb z2M9yFxOj7m(PxOB?lT$w1!2O>T^H(5jFG`fHt5}|^(lH6QkB3b0^x#zIOVc;$%$py zp(kkYFu4V(04Rt6P3A{H8~y#KAH}YZM^1M>#6J(U8(3(XVMIdhfc1yc|$WWeI(?AEDfa?IKqH3zmJgS^+g6AG=3}IjSQqUXDK7%Y9My7%uF3z z@>U*Vy+eaBFe!Z0YuOuKXZnbkYHC1ABN18YMB2L=iSQ}{XfCkHB$K@R z_aEO4Z;L6my0NyNP8D%Gm7bWXw+IzXZqq;D%_?HXmx_;UD4;4S;9abfMvHSGfVS7E zYr{2%M}pwzlWb!8ma!%kk+cQ$rj?!fQ7NDfym^jMw`Nx*qpW4oSjqrbBqtEW$ppGEE`Tcd~}2ujQ?mv>jwoVtSp zW?9Mp4^m=%HW7LK-$p%xI=cTc7xG*G>zBS_uyy=|cTJ^-m%d9Ea%u*q#Z?=g7ZKlW z;2x7qF#t?;GO^`W%6%l)g4-3Zk-h9HW26{xZ))$RXMPe_8JmX-P@6yZ=mSqoH%^lP zm9xJj8c|wStRyp(&(Qhhg2|W@20AE#A+C7q$yOkZ%3#gKelR1eZF?XSJs$~HST**g zY-xpHidrGZDmy$b1sZAmVUY)*cqv!3eCVsM>NjSmS#LjD5+p97Y0PnrKzGa#rXZli_z_okkBD3v6J#@b(EwnRH7_HG3)rKl!%{- zCt-#h+!BDbl>*Yv==Hk`3KnB=Dt*FZjYl$e=@)+FtMP68FukLLjd_%|;b;5I>L|7_ zv6grWsG~w(s#aJ`ybzafc(7BVpm47mCZ&gj)kx&RTcIR}08epmNyGx3CL|HGURr$Q z(-d&ii9qVcVh2pGJ}3cxbk>~pyaEP={ltSh7x-mmfBidjZCoYE@vGbstSVrVi31!% zV5Q2hq$`Lg$5L=L_WH5<=Z@}c8liP9bYYi7EBsnjXcVD5R)V$O##MT^$02NzOfb>= z7z>A9;vxBvh7XQUyhEw8iTE;xXF*F$&H+04L?At!SAaca6I7`l+d~7)4+fl@K#cfe zC~yV`_QoB#2$I$x;IzF zt3Rhxu3Ug1<^9Lsu7kO{x-9XkuA;KZg<1z8kN9HK2*J#)GK|b%tQiBpN-p@GA1f{B z&V~Ff%A*Cl3%L~aX%K0H3b1=P5#&s~<-vW9<#N!tT(Q0enu;GfgV%e#_<<1aC`0t> z_gA>MivXO&#aVc=V0OTI25T_qdSNiD%Pyuua5eGoax@(H5sc_(014$_*jFqCbF8+F znUXB^LAEw*T$$LeHa}-Mk{4`)>f&jHLwZ~OKJ_8$FItq^_u7B)JQB`w$IVIz$$97M zElgZfO^j`w>qs>@ng*@9=jl0dFECz$BvhFEOP`Qt!kG^uO~EGpk=xR-0y(&2y$L-@ zD~SgF-7bLQ8M8V2G1&(OF6^abmx;68%`z-lW%AB+V9smJ)M27u6!7)vgJSmJD!fLT znH5=m2~x_h>1?MN*!xf(^H!Z2VTh769Sjd9<_`7nBYIyiN+0{`ge)RnSR4~AH`Fx5 zQ1Em9^UCmbj(jV>Rt%ze)*JYpW)R9nT8(V1oqh&?RMeoRlg+6?+P1`tcCa^&%Q2Ar z2J%TXy+})9S2)7)4du-ai`aR&TVc7w^R)cCle7tNSB#!nSbXARN0*S46t~9WX$^9A z8bpG%XBNFTJA^5aLKzY#)P>Pl;wc2!&%KZ|)KJ^gZuUP>P=jz1!8E(0~L_rjCXLI5jXrGHFGDyAbwFaBWBL&U#C z@Io||RZ_~~w-7+Rfa(4;-tzp#i})w;5z7g{V#K{JWs=336<$Rexp@2dt|T@ef(qq) zI@H7Y#i&rAx>3El6Q1?+&RMEg5>`X-inlIWOZ2O8#wo6L_oLFglsVfewAXinWIOCb?go?QtPRiikpF*S@ylE za;UUx#Q89x7tw2ddSw=b9BL1U1^228*<$o8*Mw>{^Q5I{P*eYj5 z2{9`3z$<5`K$J2tM0Ns?H+^%GB7genXCX0GH-L!@%=}FL7>&N+7_1WaXhx8!FoMF9 zNo;yXz8yj^o)|zl4qUFtI{W>9^AM+a0Azgo&%W7&Cg^n66q>iP$NNgvU#9_e%prY} zr_&Ph+9NHIZt>FNXF*Lg&U!3bt%x!7j$o$J7^BzE#u)od-wg5yi8n1d{T@XzqjN7> zus38M(k)-Ru=FIp4VoTunz-9Q*uJ4vdTJgG*Tz8cC&p;`X04br-Xb_|Cuc`K5Kt%M z%hn_O)rv@}`CW#1E6XQ|lwQ%x?M$f6);So&rlh_~rBB2z&X_@{_<&41} z*9PCHEnG&kHA5DuD0AvV0JZr7R65}RC8*UZ{q>#Kn=2t+vG>O%3M~K^B=zdBFiN(J zE|19zp)YesUFILO)6||lpfy52Q{gW z4-0vzy0J{(Jz98HD9`>drwn0kHIQ`M2m&uImYBeyX1dO2R6uuMC%?%`*Uq#RzU&M zEX`_KwI$1_tznnkmiQ`$?x^M{Xh0@-O>v2-UiKCT zwAjVN490lzV#$9~Wb%4gQcPUSv3H{a`8x7kANMRG=6u|yGl8jdmu4PK8x|Mxg^Npm zBH0dXme}{>M^9eFzE_k>@(`lI-q*2T<^xYlvjJM^wdiNYDwwf{Wfae(_B$yB0$_K~ zXRBU_f;F19c+_wNDlWeL&HgC!*e&^NMBm!Oe-<(*Au`aEz0?yY^}A>S!%lRPEn!o6 zss~Ihh|L`zQqJ#3;&LAm?^ajm_=Nnh;-asW#udQ(Xq#&4?%{l2y*KyaTBp2u_UwtD zNRrB*!pIs1R92$T7^^Vuk3+|>vttU zs}^-+;?zelSY~;zSI&qM)z;A1`+TGB^GP5E1dlTWL9ZN*B=gHRT7aimh(tXOIzAyF zo`-Ol--jGgI?}!PNF5U1gof6Lk+ZB{dc5?cQF$+kTo(C7whKcFsJJ=q zNR8ZQouT6@_J?@-!x9CVP>eGlB3pX#C`{A6?7Rg#l8EwNs7%8yr3I8M`~cgkJsPGA zQ+8C0z|}EV?sm50q04JNAylJWN}tfI*96b>XB1{aO2iK2v|e1K`Xq}rEJ?wxUJnoR zP`~_SlBKlMo1^&sff@+;&C2<=Cb0e$FKXeQLUctN)1hV>D26H8JD7WYcoVz);+J9v zyag_&)-yi!>{)ibK>$G3SP?RJC0291S)JG=ehyw<-a`)cgIir+U6=FfebaYS1E ztW((A>=BS@z7Qw1hUcg=Kb>>GZtk#{%;H<5BGe|=>wPszKyhcEULDk`Rmj9X6@iD- zjCqpRN6W-=)m{29P3G2Z6a9*c^hr~GRDIxGRd-&ZljophJXi4$;xPa!{^tI#LaB6C_KLvSxftz&T!=pkq7=`> z%OJmA+zPG=jH=2CZ$VAa5m>3vpD!-?kHjmlPBG1Vz-;HCg`Mv_<+>msza`F?8Htft zThi;`b(Xa!yXH8TXeaHSXU)-qkx8_$g}D&-DFTsJal!P1Qfb_p8yvXnvZ8dLPYVT< zI#d_ZgJKb_fSGLGUXWX(q8~jEiV{)JWWfriq~&276<}qLD9(f%V+qohA*JlfO9BL3 z-}ht~gDcPkHTK*(&CSsW9e&Zkfz~&%KV)A?-~Zx z06!+~tS51Xjy9XyvylisqzSN}LhBWTg};3xEUwcbKvA$Gf~z(Loy|BFC5We4C(Dey zGdnLWLydydNK;!OY8WpAqEtarNm%>0GUGSOPE;e1lNGpvRL}U>c9}g?>re?J5aMi} zu9vs5-7iCtK9U@-um$jz8jlmU|{hu)L%mtmZf}b>tGGDHa>Cotp8XgoZ|UH8j3Yd;6LW-X*-?uWNkZ5ENU;kKZpnGx6r=g^2ZtK@_`J zY2S4^2QuDVbj6y7C0Ef#TV0)%M#Wv_9%F=HY-vkI9o@-{9s~P8X^cdutJz5Xdcawa zO1C?I9!|zwnVphv5rT4*w!l%QE)*38Tv{+Nf<^-Zv3^lnvbJU+-lEF3;@G|y_^;~4 zWp;ilFjO4c=l|{gFXG=P4DSh*L2BZFVkSuIkr%wYBsjN*r*x$XdMk2yn3)X47Vdw- z|NijPpM)^^S|KNITjf?o4w3I8@8zd`O(ZVXHO8}zr^v@GH(5;uQX~VUoddSuZ-n}P z{w!z+YXaeTX$I5Y{nzLQ_Oy z{X)}~h$5q?;kAi*1iFX9&3Imm>!zk(c%EA!Xp`7u)tfHFTV%{-9F7uFk1(Xc)bzM$ z)&ZJUs%=vEdgnkvOrag>y6J`-SAk7XnC4O+h5L)7XntBv_a0oow`f$Wj&@zaF4(zI zB|wmrJXZ0I-a_ws4G2wt^Nl*C1}}a2`RAYh^QZsh#R)c3nZ6HDhJej=k)LR#@o{3T zdvre!PFY+?n8j4nt2PbBP=_TAY2X{a()x7*Q6JI3jYel!B1c-49T!Q850^r92 zD}cWgTZ-*qYF#PJ@6Q0_AwPy|SfuOq9RXk(m|HI@Q;cM(SFU?9dSBEY>4W0%(yLm1 zdutE$!OmWY+utsAj*UN+VRAvBdQ}p&wYcspS#vQgwK{v4Fk@eo%V8~Ah3nf?kNG+2 zAhqidZB;n}nX+d|whsv^UL;?bC0)l|{01ZXN@JSmYFnz zX<6KvhnA-0)jeCdV4B@#FHcgRO4-aH%%E z@dJH$FS7P+eK07+U&?%{{YQNC{^Rfd+V94zzWnkx&2zv(2+NcYuIy}>xyYPh=FT_m zg=8666EHU(yVu(?)m0^Xldk61d6{30?ovJi{t>aUvu9PG^EctpM z@qXWyKvqc^VvSD=qy$q3xvQ&`UKJr+av%xzBM?v3MTy$r0#oI}rSdDLIK_29Daz(s z9fL3qo9s+hA#JYw9iI!1BP^yVdvVmHX1SL^LCmM}Ib7bN0sC{!W|oIzGcAr4!Cd;; zi1+<~0a4B3CTvc#BgZd&@!b%vB3m%YE0SNOM3iJ2`Ga{x0!*$#Tr4YJZe~hd10C>T zZ^wRKJbz?H6BU@JWOXQ8NoEyZls~oEupNc!Gzq~0jU__9!!p@;g&1#h1c6SCgXVyIcME%iWMcEi=LNy}vy4GjpuG!?o{)$&2f{%vX`p!@MWbj;dv{6J5}8tbBo*0>}JDFJKK zA<^T(d(=Jw9W%dd;p_cFNcb;*D?X;Fxc~Tr;FnAKaQQ0-B$pEGN(UAXq*WaXI>8aH z&Up&MGe$Yulwg9(#c`C>wz^g>>WzBRG>apafw48$0@_s$7-dX><(bPPTD3Q ze`#S%GwHdT%RPq+)!)TgSEMcht)T7h_=9auCO(cz+EYy$#(Bgts0z|e8DT1qN_GJ> z=nOnss7P~|Br5t{*DLsK9jV6c%BXK=bFdyHA!3PJ5GO-2HDxK*y3#UfW=`HSTrM6- zL{>v@iuURgjKx5U=k{B`$rlyz$-`Q2Ya0|O@X-a_{F5hdt@%s9Pi#$b>LVBu@qB~x}y>5~okCggC}6|DB>ct2fi z4WJ^LOK>F?BBWJb78F$}H{_1#a7+>k=cHqNScF&3CU?#CkLQ7YuQj&v87+G7g~_&- z*S+qoc6?6zgmxL8Y_&|7V?^{Xn2D@tiLjOw6wDUBPH)$szENq;dI3*6-pSQ${4Ei`&mdu>*YZzP@aia?YzSSz+LF$xH_(HMrB zRO!^cYh5JciHx7c0Sv;O;%CIa3CLueQL{TAre&GveI}Eaj&ln9h_`|%l&YfHSv(Ff zQc92BZYSQ4+8ox77k3Px4nCT5aduf~xmP{3gqicD8cN?(H18@GTj+$)z*isQjTGh$ zJ-d8(M$<@ht773-qZ37>-S9*02B3yxCX7zW62a*i_Wr}%S6}p9{8xo|GGxZpE#=uf z3l}IP3ITx{e%!exvg|*SC`<=|*?}bhH2|SQC#Db{NHY)_)gmE~P_p_Bi6BHv(Bx%? zl;G~>&k_JCkfUX=I&(hm%8*+Tr?X|r%^^-0HE&0!IZUnFXT0nEH24C%CZs7{5f~G7 zebTgo5A;@Y87i6E1Qw=6yGeYcQaes9BcN2;R(;z13m72HPPm~z+nlNwBw&OYZ!im?lGu^tA3vziSDAze?jXBtCypzVygK73!wyr6et(KQ$Mo!|h68)x2#}*TP+IjgK6vb=tHe85 zu%HAfF_pu_K_MsytMOTZiliXoXY7tvPhkFqS5*ebcH?<_yd9$>D66u}Njdq>_3?Zq zE5r-?;y-*#Sa55D%9t2qzomRJGDM~(7aPK-z+Zb=%Lm@TjO z;(4T;aOuZIimvL?+cJN#_#$>gaWl|nzexUHfgx?}U#9lN9Z1l+M8vS<2D*2{ls#1L z9^@?=;THH^6bKeSYXxy?zo0)Vk`|7`Z7SKO4bq8MMe^!Ih>_M&0YW%HRljYU#t+^Z zDd%xUwitnCYLm1fHBo~0pbX`q1w9E-NcxmUMC-3hbro}2KqXpi7^<;`aZ)TxK1fJ5 zBLLrc=$k@D#_sKOR$|!%Z4Wr+_>q9egyJ%@c4x2|+U;NdT6~HyF!Ao8m50KeQ@U-4 z^ZniKBxjbOJfY^*gwA#*3ngmjbgcI;7|5mSSCgfK+u?ZUi>1;I6HE2Dh|gIzd_JH& zk71tK;i!5`88X*$3|s+4V)ug~QSf}P6H*qx*$2M`4k3s^rQtl{c5S@BpT7CpXu@5| zn?h!ousUvB5Ow>dMH5{IVL_Wru-{o(Z7b26H$K-#gfN2Oc;vT*`pA%2BD80qR&o%{ z;4*vn5~nf_fY2(+Vh$n!#IQj}N)qvP%yy2a>#B~1mzHp9B8rGYVp_^0F~hp>h6RK{ z5T@sTIm{fI`kho>0fC6XPsW7Zj*k4Gv*B&a1;tJ@{2VcQllhtP--nHQb)hS%?uwn&9{>&TYBMl!TRXqat}NEsCw&w235m@_Ik;g9U*~=kf){u ziEbKVpd;9nAhO*~3m(ORX47x|u$DxI;IVR<$QOr27f^8L?xTV&uBF6mY1SVHV=Iw- zb?p}_g-c<8tiq}=DQJUD_-NdXjm=(=v~!~vcpV&p)Kq4s0AG~BEOFB{HwVEnZe0iK zWVf&n94%f;L3yio5Zre(9>tegivS_jTy^U_L#_BZoL5xE>HVLKt)O6oO z$Ba-_Yq3G!V&N0hHp@Q*zLn()0&_I1(VVl|aGj|DsSPY#M1Kz18L>Up>R{ja%YeKP zmicACmnmlC6VOn{>2QvV2(u+$AJK=#sYwR7gGJ0>|-M9yw*SQT%kb{mmB z%cT@*GnB?3k~7LFJT{ixhgmb0N(h+m=7M}$LS8DY%T#p0gM-Y0hd8w7%J!@K+On8yG%=aDzP_cSJRLiI$aBIU1(qi$< zRa|{^j*#>k zyStg5+!|w>_&5~1lM7tFO~XOF$Bg=d^NuI;&sl5k_yydE5jGb!QGpP^pETaN8sI-t z(vq%1tg4rr(+e5`QQnV!$FnHuxP1kxWAE_rZf`AUgapmxMxWGjhObwgZu4SWy6`H2 zAlSc;&8^7IR<(>0@;7k7)UMetUuJdwX>;V3J?iRJqyx!}pfdc{`cdGsTj9(a7-$x& z8Ihh!o}7dPoEAqD1x10nS9VAsXmVa!2(;M<8JXEYUh)O$aNlSUJ3x}EPq~4i9BlIa zA?a4jW0F4Smzf3S7zs7`QP@h&&oV8rz*D~-Pu}AS9+c+B#QfvS59Jpqt#~uNOY1Bv z28706r1@)CWS)A9WHiEQW2IN1hcYf(sKU!yCs-42mSG?*Us2ABj~4xBYE7zf~!jQh(kmFi!>8CVFG|fU@{3+I1!bmhTwX= zma1N|Q7Y1J?8%GPb+W6mv9lLn9Ui`me{S#fL*;I+1zTc#YmlvM4TJnSwAt*h88*@9 z(0QFcjDUa_qmnCm0Ij7+zyHWMVjsadDIhccitrz8a;us7V7R|u3cia3UyD*nI7I3^g@qWHJ47<{k|t^$(Jw4>d;7*Rz;L7MgN`wQSWZ>D(ICRR&(BOG#< z;CSsjNqP{TFK3IxR_AQJ9mIZ^k`>}>cW>v7-$_mt`Cn60=ug%7HOtual)f@WNQ}he zP)4iqmW8Fq@z3WAPeR5J+OOR8Y-ut6(m=FEb7F&FaK;~rrj8EXK&=I#izznfYzZqL zD+ERV_n&?o-_^KSPAUF~G1}Q4#7}f@CR-RE#f?C*WgLi*7yRv{<0+H9ET4GG&p!Ro zJVeVmNLBLboO$$Qj@OZVWVlKE)WsC3g`RLAH!mpL(k(&RWpZ9VC>DvBm|T6!a&P@B z3;I&saQr3isj7ntM-3>9ZM_2gq21vuFBtEor&}(UYq!&V6$C&|VD}r^^Qd;dJv=X= zmzSLRoV&EEe(6W0pY4SjH`WAW|L#1&O*kJlx2MkfALc@CWK;DMzv> z&X{BS@uxrjXF+8)HObjY-JPx7&7P06hh>YA?_UsYrC>QDu`eeXvj$_B38&8=Km9{| zhkGfYakb-jNdR(l;}fh8TLCunDfH={BNTWDhN}|U8w~Fl1{3F_h-aJ#LA}#43>1I6 z|M*+q@iqbG#}9)ML!m5K6!`+|d|nbKGdLTLVD3qo?^s})717|%!O2aNq(j=!V%DyV z@rPI>cGlLOf9M%(bFRA0+~f~G{nWCZ7303vMj{mdDg|PL zT9@>g_^$3kY=|@^WeSsO?ObzngZeEVN8kAUULtQ1H4#$0~H;R6<{(Jc)@H`_3;oB0!vX9eEMa zm^gQq%BAGmxTV9)J2k1)LQz$aDzS_Q?@mtV>pci<$0Gst4v!dSK};`>hl-)%=qSdQ8MqxkQi@NjHt@0t% zSR@)yAv9oF1|sPUIAa~s^YyTpV!L-}l5wUaHm(!+uu*%eNCww{=_~18d!(yGRl<`9|x?F(+Dks*3gZdkPb!MGwSz zuAUX>2qLwZIY?n7ZmEx7r5$gGEg>ZD|4(Zd&AdRg*Y5U8nY>TcLUqjWB(qC9mwD_f z>s}%F!_rftHEc}eI8{cpBdQcZQfh@g1)Go=h;wzB7Iy#E2 zqWiE6{nPs7JhPcZlIhqS0&W9fp-G}Bg&%qZN8t2@oKyIiX?gMKp1^V7eEb$<&Qd)0 zZRzYMc|6Wzpf?VYA%AOnQA-DUR~e8cFS+KAb!edI?e_t{!>(q5&JB}|=ogE6ii2T# zb#JeHMdb@FgKs=DgH!n>n{-l(Z}0(H8J4)rnVOpTP&qMRPBnS?c63%-5rQ1Ml6x8J zfopcQf<7qzyv)qxoZb|)j-VO`WSD#sx)3F~u=wb)`CM2@v2u|Wdn0W4!hf^z%Qc#P zI8+T%&C$Qb^l>bVPyn4)`<~KS!*F}TzzyzCQEgd6*M`@um9o(bdz;LC5y7F*;PD*}4 zwj~nZ`yu&B!*TZP^F0~AYque|qrs=rv=gHh#6rUX(hR&Pt38I$LSa8}jmCUxI1(%X z;x9GR($W(z25cpk;2gwAgDZ&+_%o_EY&tnexoflvp}29u#sG(@+I$u|#a}mHKJnXS za$*UKI&riz=mA_EWN1nKxZ;@q#Xt21;gVL?6%=cNKGSqNZ_0d&Q*18}aRSMsaR7Z+kN5<*wHc(H)fR7)=&mB~1o?p>w(AAS0pvQV0*;&`Rwf=x>F;K91! zpUL>#yQvgQEOVU-^4f20fWV=ODo%97fAP`RZO%&kM9&9sKVHEhE!)X<7Z;2K*u`mV zYV)-ZXREjEr|hEE<4?HpS0#iF2A9KQm`BE2H3lNrBW#76vu{<*|EzCU4FhlP6v?{{ z&&CRnpzPQCP*2WUY^4muZSg<27a%kR!Lw{3V?Vvji**0-*`sp0Is~j;A9r6;^ekPt z&;QH)4{bYcVUW+5HH3!ZYmhd6mTw!a}hwmJiEh#Swj-YB&g3sX)P zjE!AiS;%(s6a+sRW(^S*bLqf*Il1LB`^}eF?e6C+^cRWJVwiCd-0-+myg9?T&mNa6 zzNUuSXGI&VbTY7mQySj~zLvTGO{?Mdu%qp(fs98kkJ#AEup-^gZ-PFHi;qm&eTq0x z0JgWcP=%UikoTmOj{ika9!p&4BR@b=_80@RWP<4pN;`aJiJ56$89YxWtVU0<)_Gxd zs%dxyRM~GJhkI~HuZ$#*yz*S0@C8oK!wE#73SZE#(3GJ65y6|Q5|-HKsNZ85&V^4H z;-%o>pVJQy>K`>3K!XaKjFU%0k|6j#)1fW?rr+UlNu9f4|6GW#>+Q; zK0ZnHHG#U7JP}aZt6C2Ls<0d`ZkSqCsMCYFiF&Kq#9{v`yWWOW!s_5A9b`EA`J+s^ z{OWgq)obh0#v_3(+)d8QkNAqvk6YL@kV|mFKBL1dqFT_UMg1$sOD&deb%hY15mM@93k z?F@pT$O5I-CBkFFw79Mac~s~XZPuz#`t5Q0ZkwFABDSWG;wo3dMcZ*klyU>o%6OII zDxSSqG94IcILXS}jlCiYnhDHHyrWBe->VEUFFq+ka#n>+0?70l%u4j*RWx3lM{elK zRGreSviSO}?=iU5xUd{2T<*01$6{^1v-p*5*s+iM>zctFU%XRf&HmYurhE9|yPbYN zj)H#Sf9Dqwmz#sM5$h>8#o!<>1uFa$dOY?9nHn@DVR9VVE)NhrLTj)LXP|!?>*mMMBdrvUu z(mXzY=#u8=x8CKH7EDWNFyXRRTyNGS-KLPPBKdK;iY(L4$NefMr>L;F9SXQGe-iI0 zjawEo$(ROU0CZMRuN6vE%nioOe$jn9Bu?U-;nF#43;ENAs?=~`QxnC8Ambz(EiplB z!%VzQ7@ZYcV(#AEwIqAk5QISEpT`$vJSFw$e2P8}j!q~_PS_vownVByirr~QwlxLT zE=)cPkmW7hcwMI6T;oeB2;n2@&*lUa+vd{1mIkTF8%AoBNTGJx3J!$UgVU=-dY}c*vmDuAD-ul0nL4+<*MN--(?; z5+Dm@RrDvsCQxq7Vy%y_P@_h)7NXr`wNY0n1ZaU1e`$jW5TjDg16T!6c<%M{pa0{( zzW>XhXs@^86#-SeE>01EK4MmW@E-1PHE?%jwG7SrJ6H6wiWGq<6S4XF)xF6f(Gs(V zYzI(z{)I53bD4E=`bvt21V{Y5^vWK!|NLzNGa8laUm$|zDaEeeuJ2eWA^w?HQYWF! znr2chP;WmU;hCsIKUF3fwr^aEy~&*+#+^gunrj$(5BPYYIbD!B|C;3dbqO-`iE)KRdR0a*;gsVA1rhZfQjjUKUOX!yfNd@- zmXNQ@;OpI<>9E$@?(KCpts02QkU1-gTsX5Au`NQpc7X}QNa$D-kqYG)_3(uYwv~7{S~8~|{PW-O`v`8RXcfU#oD@8K_eO>d;CnMn z>zTvmOb+f~k!72me(~HDZ-JGCC=KSt20CyGvzb)0G#dYm^B`X^zAnzaijix8kWjW) zBy|G(DGyU-0X9QQG;n0WVrX@elf_;|=;Rw8*^x>@(=>*v2N^Vd;2ox*o!15g@#6@9p~9p|~S^LE?v?wo5YquGX& zCgT)TBpi;pa|274W1v0-{FSdzYtYFjGB*8qX`ytDqtggp8D?SefA{G~O1|a1EdJ@U zr66)^=H&~rBR~D7%tVjL2M{cXLF0zGTE$iz^4Bep@<(wpYGHVjH3?|7;r>Y?c5&{` zGRuVP&jAI^NDL+ekJOsHL4o7VDg>a2Dxa%>H}wm_d(WP|F!Qhf7gNL6sZ39qim4&^ zl%I@*ohxa&CKYl#Ii42wf@cuZHG0H&9vIdS!Q{u6i}h@*bWGX)CFrsWm)JN^gN-?G z^!o5lt$BioTuLU8`{@L92``x$jyTzqeR3ahn34u%y7>$*m-lg#wAi@hfrxZba+6WV z2RMLNyr=qcmznKSYg=L{%R?%uLwpduQI5$=RU2hT=joc+FpG}msW;yYFj3?miT8NI z^N0W91q8*(5tn8pYNesx1alScwCc)s`k>n7X8QqMf{-7A&vxF}ZrvgSTHMJMP&&T0 zfOwjio|H!8+*E$|P+#HWAaBolcxN03sL49b0VAEn6@i8LyGpLp3860`jlzh5$LWjF zYd@A1eDdp2#+&Kf4mY&OQwA#J)D<6+7622+kNg2F)`?q;W- z8eL%xJjUxZb-~DFWh@<0u5a#K6nQmatZz`e+uKbTapx+lhZwpMn{8m>1CyO%HN|8g zW_$t|3&69mSUSbq!YmkJKou(=l~}v6GlQmf&hDcGgCH?eAq|{`=g;DA3Y8Ub``NTvm`T)dI3 z1Mqnn<4~?Wj!`j;*+A2pC)ygH(u^_PQ~}Y@A)n*i!3&aK7(#5FMb@x5TW~`A$}>&m1qOxo`f`09U2t9Jq(hjuDSZ1LX1J zM~_1P42F3u_dosoXK~n{|IMer%zyv8Pd~K;+vO_B3eSl5HeF?*nn?ld5u*#Wk=6jw zAcf~j#CLkZ?*z1~#OET(T=tWC0wy3^T~`c1TNJGyy5(Hwn`FJ=X#5b#Qz3F|jr^F> z!f7c)o<-6K*ZM$kjwU4qb1`}V(1ff}*5x912u=~Jvic?r9jLL|9-NTg8LfD$t`G_k zlzKbO^2F8*y-2KfJH4+4g(l8YCkzcvMi0kfhV$=M=PCkJTa%G9rtAG5_dgBID$fex zPz1~_IifS7Os3Bo(d&ZrCL^UVX7z-SQ*OurFBWs|?Rsz9e9N#}%%;@qAPrQGT1*_3 zYgs0BcQ=AHFCkn!2P!HfRY$bFCvQ^$+F~9|W#mC+yPdN;W_+aygpvg?<9pUpGd#wW zr_ySC)+DvB1ww~FFvj`Z7aQ!_9J9p?WItvbno|lx!qXB3!wy8V7nWU~ zo{>+123#~>$4rro zQ>jq+Md|4B`NysVCV)ISP7iPyv7crz$*y5dYL~uim>M6vH^8qnd-l&bgC&!%iFVT> zV#z9@t4v+Sd^+4o=_X7a*5s>!i41Z`VZq=vtby`O8}o-;>`(VGT>xf+9rydl{~S+})Aq;T_%1W!Zm7H1=3my$g9FS$ZjSSOkgIZ)ja4F7!pbBagb=? z`SW-HV1&8Cpq*)m%Qc#K1ai>_(L9l>OmOWX{j0l!_(^R1s@D7+aP@t&TRFP?L_Z1r zMxwIVxe_@H2`}$H--k*;OS&cD>BaY_mw*t2>4=vvRP@bjm;qzaUtPh5i?3qoXpp15 z2Zdwqu-?kWq3pwX#!WS!F{=pyO9=S1Ja8ZI2)hlbyQYvxy!`V|?*I0_Bxv~~qdViH z#7Atk^o39?WljA9Ud1h9a8Ai{z9L*g5F|Ff`24x=4e=vH8ZrPbHX&C?ly?WQAQA?c zE3p8@mon1<{hI<0#(2E+JgZKn1waMG_io3e3WA&WG*IIqA|LBoT6|{KB*rDwsSe>+*5OjJq;)B@Fzs&!Uf@72KDeEPI>FOugCcLhhUzIL3OAX7Ym zPh@W5_*8DE_E>?HEtFTecKM?m+z;$^I1oOmxhs?BDS`5G@I zcAyru#eQjuR(ij*if?n9acSqc_T@N-3=;Mqj!IVG`@Gi_T6nVL1yaBJXYnUuisMOK z+~$D_mu(K~w&+$>V0(10H3*Q!_d6CDt8hdD1hKM=##bBy7UzP;tGmPLbwZOUXZibh zj0_~`6Gx>?c*raP^wGBvVNT%>FH01pd9oLpwqgnsfktha1bZYy<8_-<&d|&=7zQm8 zZ+`OVNus&vj>OfBeJSN zBrK5%!95sX9pGTrXzUhgTn3B8axhXmdub+Onm56kVjTEQt2WF~iK68wzO&1daqhc& zHk}UxQDso#fn*m6XS67m&!u6V8j`~oXP^ibVVVNMAure|s-Pp<5dsL}Q-AD@>~{8Y zJ|meKLR)b#3`;An6!B`++ovYS=IeIji3?9&m<@*ez(dM#^-ZN#d^?GkKcIOHK-Ff8{ErPM`LnynZ*?j2uNe(<>XavfD}1`_}d zuHYj=T^_H+i(EkkTIP6qXWN7-9&S%OM%pWlcbZpGj}u#2CiUpEL~A|t&Nb->HJ}#F z)wBV`dasFkMquQ2SBo?|6OkK|&SHKAC8GFkUUFIx6&*w1r@-!rNH=s!Tv)tyY~p&G zDMpLcxAfi*TdJ=F3<-GRG~*Zguu)F4HmO`|Z*!oVTFNLpXS`cQSwY0s2E(7H0FY?E z$%3=am2>=ubJY}zlA9-(I4ub%Iuv0V2Df5z`B_9rhz(a@?H)B;u~BtHR~3zfq4{%j za-0}81O=ifIIy?wC)4kV)|%Bt1slV!Vi}t?NudVS8Md7sPV%1ZU{CxPJv!+4Aqyhj zdipN@we{NJK->VNW`k}I&*Dhv4@WZ~{40&MO~pU}I;pcWJdF>_VpB9x&5T4n!O-xSU@(CUhTjyCIuBZ+qC^ude3vlCA#`vE2Q%pV zQR}L2@l9VPKnKV9?+14`lVXajW0?5U`>la!v6MI9V{A&iC|i}!Up$L}lA@<}|DU(tatwLbrUejL1 zi#$Up`x&G!gw?3eXkw{Yqz$4hSbkhGqD6oA>F04(jwfk84G>hVS&N^qhP!jb*m%?H z2QdbFueWyNsYHB%y~Ur9>J^Z*j_eX9tf&{P7NST&8R|!hJY0CRl+g@oR(CuzevNTA zNq@A(SMfsSaPtpDy}mI~Hz^gaDf4a<=y6D`YEv@EU${{XR~|W5;M1je4Q}jq8Yjxn z2}qJxQD|;;{wWcH)ltc!G`kq>c~<1-%c;b52?-DzLTx75xDm=UdDH#T zU9nKt5j@F06E%G;!bL@Y)Si`R9nvl)`^`Lm@jR#t`g?q)TUzWg`eG~TjlJWSzrjWf zHlRVkWm(q>O4At=48SinOzN@|%st+TtMPMuT4M@H^ zoDTriqO(DBDwpenCUPlLq(vzFo>!!N;+Z4WX?wjZ^bm2y0HVLl)&hTZRaU!Cm_MF^ zR=CJ^oC>_8kX67pO?$@9amX58YO}BDeRL@!DZQ-oY=qE2_AP%Kx@|ZfTMM+} z9q4Cz;)oql9a?=@;6@-{t3^lWpbz8XSz$KU5a3|28GD}%Av9Uu4nLT@WKr(s&INoG zO$$sGnht^RK(>}F(A&;l&W<3Vjk#6rsllKd(qVO4;ey22zBP_U2>NQ*fMHBsM?bHP zHz7ZEvpW1cU;a*P1?^Mw#|uxh_x0AOcynLFK*{R}_RZZmV3-M$Cj)UfJlt_&-zJYH zC88-!j)x(;=+_``n@OAsezo&`UtaVE7EUaeS65^hdndvoTiYMlpO$J0R0hd3hwi{e zsn}X$iOfBx7YGP0QcxTqX+oSeWCn0ZJRYmSQN5+51wW(>0ze>m-hGOhxn;5$t?aXs8l?QjqR-T;tTvlMMpox!P1Lo{v3UwOOSj8=Bw@n{T?!n@=L$< zMJx_zRKoSt(kkqrR{1iUt}qu%*%rDc-bpM3IcKhYj~*R;@Yxd80l{`dxu>H=5c+r& zO?Tp}guaGyablb5{|}qQB9+bcQyVWIXpCBXoHn@?W`X`59oB3MhCWlDm2!%e6XXFR z7SA6NgEdbdkq{Op#ZOrGvgj~FZ7=8+`=L?TbE>(9Vk8pX%S0xBWbpyfhqO;VQX0`rFBsSQr=R}8H>A}J;sD%a!1Rkh_`UCb5y!|5Z$|IqCClS744Ui< zD%nrE)@Tylo^< zo5l7iDll+?@}A=vcm%R6($HsHXRPvFLvjxZ3;36LSmgZxWe? z(Oeun4Rum()<;5)gG^ApO5ZBQboE-dp6M9SxVTLnG*NL={AB7f685JcQK`yGURAuG8%vuv=+*ckB z^ARa*ENeT%8V&*p?^Jr@ZIT|G68lPqYw?@nUSmQ61}dR4W#DE1l`5t~>R@kVX%mfb zP0SonU0ErA^?0bk_TD2g9iqn-O10Y0lM`B~WrZw(8z>15Jjj(}?N>9MXibGnDq)3v z6WbKbwZuRG)|{q60}m7#p*qJ5kL=35vxHDb0NR#S9eDO_Smvu&ui~X7Bx}%^-W^23 za{V;6bPw|TkKg?wzV?cvE2q6q_|Al7f>`z5K2U3AA~XAh)>-i!@&q2lK{Vy6gj+5N zx17w8zq`8^%(`|}h8V7+SCRGS#hU~D1cJ@*tS`R&9Ul}^%t3I@CzGfNW-+E?LxZUl z%Dn6b$I%6Ld8uwRSQ;4hi!RM+Da->ex zxnxO$Y;Q;il-Xg#R>&bxVR5k4Opb{6M_~U9BcS3M9LrEXTtG2-}|1o zp_n)dFxSo-(@I--z7$(J84gn&gSgA}?qW#hA4p3V))}CmVg=lLr7B~Wh>77iY^0U9 z-rw8SX}}-btnDblFxI3yXFaQeiI0!z(v}Pk8;stY>JR-S;}dneh^0KE-@fOY(U{9OKqEc6}WN#1b&}$Cv5k;u$HW7)-n9Ha1%fm-9}x zb_Vg7rj>r){`W6qcT`XlRxMWa!_?H=(8hz+=nWD+L4X>Oy2k~ByBX3at!g^0Am(Fv zx5g-Ulvte${k?4cP~wa7J%c)o1l>GQ={ct@o=Hz5RWQ|s}3tM1ppe9lP|SX5(Q$l9+#Mf%ckbsgZM zHvnzB(l+1@WF8s#hB~ShLy|BLz{SG(??>f_&5eYnc8)N`<^WI1^5w-MZvxOLQBIqv z_#hK^Zl@ot*V;S?;2FQrIA~bos=yXIU|cPs2bB7ub<<*40mBlqfkb6$2Tm1Baa!zA z`Ul@xpdp<3JJzJij{W#u%x(&uie0-iy-_dV;2#2A6ipvyau92Vo)>Y9gpY{Hdp(`6 z{Fl!^xc`~|@*Dp&4*Pe1H8wx%-bA;vU`~u|MM>jF&WpH6dQBXFmLJb#=0!2;P((`X znd2_d+ePs@$j505oZUNU;>uy zo)K9rjSq+^G}Tm%BT(!lzEX+H8~J_jyG866iBrO#xC|$#5a~f*w^yUQmOC`xVqc+9 z7@HNl0fhB8pZ?Y)5R`#W7V#y6o0|P1d>Wi>GCoX_2)9=YjQdK-5bN`ryWC2sT~zy@ zefs-YgS^sOecihHo%;}uIK(4qHmCKdlk}PoP8jUP!{b7H{kK7%Jn83yDB-5x+kyD9LX|iTJ;mGLdzdf${T6h4|>ez_ACHS%3 z#_>CvmNqDiOco!~n-P&5tRsJ}^B#xnArcY091y;WOQ$3n69XOO@x2rGNF+S3ZSS6@ie&2s3h&(1Gti-Id`~8x}-^)sPAs$LrY?b$Epr zEGzcFKO#ZK*>GBDgra!Vek0fnRN%s6b4+(KEfGP6_b@%Y#uMp)Sab!rw>ECv%(RC+ zi=y?#MY7D=#m3U6mvoG9Tm)yA5)f~`RmB$WJXR5kSrO@&+UOeI7d|A;0AZXlfConR z!mgMl#B*ZXns{C&%1{DBz!t8KRo;KZT$<#^Pe*q(??L*To6R=-e69pV3u&45@JzTw zI34Fj|A1V`A)LSTrHRyn$13FT0mC89FZR*UMXP9yeRdAQJ@!LmyvHRvb;%6}f?coW zNgD(ognp@5Ek=P~_%*fU*tjWw042{O4MuY{h=M*>AoGmFF{)1K!LE(=;gyxqQ<<7v+2QplJ z`XHZYrAp;jvuXvmP{ph%a6OVXU?_qjyELPXe{(-Fm54w{e&jwhkfH28OBQ0ut^0w2 z``~@W3?)f|0h%a(z89l&vyBK?^opU;Wh3H+XFa8d7V(1rVD*HtY+ z;O0yiY$Im4_tFtw9snJ6kBH5;=n0}Gepb(35-8;hYN0}XL2w8cx0EAHf&LPOz*UVE zVeV~qLM|%IfB(Z^j)x>H7c_pOoF}nD(#C#2VG2+-@i^MVxt{ z`ykN~thrF@Z^7`Xd8y4{V4@6|loee9v~O|m_*A@739q?IZHC(vJ=wdUeT-tFv0&!S z{ipk1cw0{wvOVjD6jH;-g`R?T|>`h5v@Wk{wVapRroQl&YOebJ0_&bkZ-D1h<#k%NnRa%MSeK7!?-Uue3 zs#w*C2NlPXy}nnI_wkH(lzzm&cQIkSDRei6fq(9!TI(3L&Lx$Mh?aQli8O<0+kd^g zyR%nvW|Ew@2&1$@zIKU^`mZL;1>1NYxXLEx4EC*>K06{&??vwFMrLqwhylT+%mQ@q zzV;{ge;r?=X~$)u1Ex16Og;^S0{NNi z=(ERnfB2At{c-#E-^L37SckA7sdLMGpm@XmM|yb153Ns#EfsYaMja!0;scs4^(+`` z!2DG|mAb%5Qj7S|>8SW-;1Tt#_$H-m8Rd|!d_T<`4pd(bD${IM z;J1_aJx!jy|M(?Yo|{|1ZZat>VWW+@ZP@ z+z&!`_j<89SWK~myPN%ul`wwJ zOYnxBs^edcQ1M0fUM@dPMDVH1Po%w2(phn!aCXohc*ITdFg1Hw1zRPfv-<#5h1D;l1Yw?*feG{tJ!=LM%^4pm9o}05=jW}k} z3r|`OP-UK+VRHBsFh)v~6*cTP?TMnluh9}rI*g=ot^}1pq9$?aTQ99<0fn%edXN-@ z;wyO*Z|SR3A{{TKjf(k+i9#4Gm&rA`EZ&U#UK)%#)V9&-*G--gL?x8rw-IdjwCmxm-!&r{eB@sZ z58EIaSCOM1hwLpQ=?Gf|xZ=v~A7(6hcOEZy#f)|8()?(E4&-&_0DA}yHFHLm`>+lu zVPKENVC#Z-J-f?j&MQ)r!@Tp)mX`iWc3|&|A!@6vAmTC+7q3L&a}Ky_5azF0RCHU@ z=edjlvCDFGJDP5m?69e;6GucM?QC+HK(&`DuJPs+IlSe>LgBnsr{N+{2yBmRZNqZ0 zsMk%X_hb^-|V=&{(o{9{vmdU08S)^83wN(CKnGP5?sd-&lPL_BcyF*LdrB z8_)DjAs(HzPykiNFvZw4JT$gPI{PBy5Gq*5|BnL!{?rbyVC%s+C|4G>yEusLS?=tYUyqPwUfFt=!Yk7^Tr z=&xY(*bR@8uy;m|@D8kAZaF>EMl{&;*TK!?$rB#ANzI7+`{$+{nKMjr!Klp!=( z-H)34rU03nQJ4Y_WIi(6l-)vDRmdSO$ei*bcQnjAD9H?_Ze-iW?psp@JQOhpR%xoN zp$CKOI}8$Xz5snkMnrWhSA{=Xo~hLC4B%2Lm+{#gbpDX!PRh39);g)g^Y_W_P^go7 zPFE;I1R?sx%=aHPEOM-p!n_Q56II1XO~3DWcp2Avdz2&1kH$w8=5TpUKosPI$5GNM z{4{Nw!TLtuY+in4p2P#j z3&iM-4fJV~KorHRj*1IQ5~88E#l~EE@hJF)7E7LbM>iJ~nmu=1-V03?Mo8ff#lFPl zhk55`iKh(vlAbJuTp0EA$oy6?oPcPZ^}IqP^1ARvmtJjC@+NMLjm||jmqQ>xQ!cv_ zEhzaM1&+P7vJ(62yxQ1aGa(@NF}v|EFaTuinJr1E0xY6edz$hjIj|N@-WD0uG{Oz89n=-@<4yzYT9sWpP&Bjh?~Jc0vG( ze;rS3f%iH8rq?X6_&cfPrtA@LbgpDhq88e;6Nr^z?C@2m9zG>K{xwd5WN|(%JE%fBwty1rh+4 z`Or<;x|om1dUCnd4{xCYVBr{e}2JUTZNFi0f8k6OgTj6vbIAJk53`4b;{mBi{Q8 z11-IzfgJ7ma+spig6s z-+!HL|H;2*x^Lm>qWOfX9A#ii282bZo?e&73n1mul#*McZ0zlLEhKbf?`(XI3pO#P zVnNe78Vyj(Woevxaz~I+YJ|F1Q*I5Q`7jMLLCsU~GHXaJ!-58%fBcVs7z^4XbdwHq zGkuEJK#LU_6!BSge0pqpV6rrXS3LHV#9sVcBaQq?0y3mqrs-o)E|Y5`1VV{qxeF`n z-K?!AyrM{gp9CD7^hCu@$Vfe`Nh<<0A$|Xc?}Xs^lAtkId*2xD?YD8h9hk;<0aIAg z!t`yUKyrh0Mk{M(yz|($klYbSBQVKau2UoT>J!h0cVHZX8&qL_T-+?x08*%36Rk*U zTonn(_>#(4YYe-)x#1OqdZR4cAKipcgCN~2dM&xv!-*uf&v|>$j~}p??7oF9KqpAGvNef;4AZ|LS-7?9DaIqVHeNt1SfK(~!Ml0e>%EFU zqK&1Cg8OJ`ly+OUjaV!pzxzM=^fNOQ7NLr=%2_fJ_Q-$Vq!BaPA$1(Wof}``lVl-L zT%2>P1-J&`nv7>%(@I|=Kj;ZM!3&+zLpr|K20OejG9ClpN4vARv-WlD@9b`vn7095 zHShDOPYH3>=01;?9xeE2(9LIIWH`-B){^z1W5jHKiDKeTr!0 zDjKn*)N{0w@|V_LZ(CA&w4WJga6LVp-VvNOn{1A*X=jw8`uU4e7Dur$GU$D|HoK@f z)r{ONJ_Ihm_y6%fF91<+H8r2Ms>Znn=0xx>3(T#}CoCpCgWtSq8C;N^KYPE9P>2s?##sov+o`!$t|9lKh+qucZ>7hX>d@vU#f=)@$mxuu zV)XM>nZUz}n~#vzHM)>%F=xCB-Beqt>};*Y)BYdE-mN?CE;;Y}3^`oo&NGgs(VRen z0LW3scB9cakT?w*Am<%>mfQduKof_?0BDk+;;cwijvjetB+EKDvJRG| zXe|=+z4Ci@qw1+1U)Zm$Sp?Ai|LkdPN#QsGimA2ZLym@>fk*9gAs~EiSB*Ym#g!#4d#3<| zxmFoW)tjfN6D$cQr)1sa*Kj+7e*I|+0rRQEzrwO!!ZMxpYC#T(4yEVsG(3Vxx;oPyJr{+W^i?;=?C=yjl3e30;;r3DbxU$K?E~X51`lRT`J>}z^{N3_Y zaR^j&nmvWYMNq{}$I<>%Q$+n_Ql%;s39iO#Q)Y*+CT@&`v?dkFdyK2y%MxtF=2bD; zG0r*anSk<_pMU;?SO({m26{sp{Bf*!8n0!^aeHwr_BPhMs(uSD*+u~gG}Gm?kLg;< z7WOpNPZ8sMHB)fna?=psvn{nfS%>jK0>dTdv9-D%bi14u%z1%t$O6Q-Ku0CM`wR)5 zG2KcOxXrq*M3EbM+0>bT@xniJ_PTLM+$F|2A}(UrFY%rIa>DRw=HgIaWo+!7>vAY< z9$uZ7a%Vvc)HSMzAa^yb@h@_8RB`e0c;QI3*hK&{r{zo`SGro51mIk3<`OM9Bc_rM zXt?9p3+d05a=lc((S{_M5-X!p`rTn%>ic2?Af1)i(ubB8;)%X;CiC^6b`Qm7(%R= zD#{j`3WO)GZb_LE?}*ppHKgJ&0^tzd! zadUSGcRP!Wv>76=&IK{ROhdds>_UI3eHlU4gluD3EPZ$i4F73U^OI+sW z+K|@@R@vX{Y^R!KpTS?9%}hoTr|xfhX{#F*)Ze+hE?Kt@wL5UiO`5GJbX=y`2~1WM z<$h%jbg0R1BeeVvz_2ol;j-t^@UKL5vufA964 zj#Z{18R0EM5~}>mCP@u}JdQ^otq=+R_@2DYYD=mw(rkbLZFEUHx?ySG!lZG<4Ad~d zfd(GE=5453SuR|n<|r>Q1+$GKs_Y|Xp+#Dq+ka>doddjGQzqQ;`88`@?BKM%Fq7tUGFn%;&MP8b8$74<9DmsSxfP%@O zNTO+vAW5sFcj3?S5JrgYa?Edn|93{29s~Maf#1T!j841&%1A$QFmbe!_Rb}%hnArR z$lVWRe>h_5_-4hnOQ8RdG7xnqHDUN|JNZ~9d{OweOEfk0&%TWCdK2kj@1?O3a#yj? z7E>+;9<#2b6Rh{TG;pkwa8!Z{LL=n`I`t>SA*$KH0Z1;cX2p5Xf9znHDKZ1`^2 zWEuYLW*%LgK&ZSWbW?Z^M5%H|w#dlI-3(3$;DnF5X$q_&?dB;E`-k!Uhe{zA~jCyT+dbMrZTiVDk4&fTLLC_b{V z@XXdbNfHcte56|5_a$zTgjUfmXzHj$dpd2Eaj$i(LI5zx{3R z8T1jt>yGk~JZu`y?+2sMS+trELRH1t<9wZ*=G5TcyTq`Jj?(n%1N4c{ziLT7Uz1~k zk?=&Bu>^dZo&@yTETxR2V!#YAbz@-!Z?{sYa$-dIe8n;5?=)?<@E7}N&V8(gz~`mQ zl{9zUPhQ>Y`C}A5W?-k`U=a54SJGQFy!3lhk~4PT(oL84Sn=sb#t@KNGhgk`-)$lZbf_1jE1RPHXOi zsZPhG!|Ey~NNdScC)4(T|HdW|Fj`~AfNnnlS-Z05kV(XLZw$A6lGsO8r2FltlQTWL<(`Sl}O1STIP#UFx zEkNCy2k-+KgA3K-d2*|t#t$tN8K;1Dd*`E8pPy#-#ja%SGK;R(F|{-XDthRsHOK%p zWt4iA1?n$jGG3q?;$;JDx%l_N-Jl3R4)7XWcKngh#a56>P$+J4Uf03sxI}#6jhTob zC5%|D@!b}{1P=BEN`uo|jw< zeK4^cORh;qdnS8yM3;wUzZW>o5WwfE?B#pO1>0BxV7VwtG1WV!DUXqL>?gr z%N15(n&!a7qT0{Ig=a1ZXE2$R*y)?Sjs0E-6OvRZ2yYM`MfV*KhnLAxbMx~i<=W0> zf_zD(gz}gs>@XitvGF@8l!xDMiT>XU>XUjS3rSul7SV@xJ_%gInlSvW_FptAAkqy~ z3R#043jPA!Z=Q>`nixCrLoK2#Bykv}3uH^YzdP!SPxO+$2wP1lvX zT<#KWl!?s7E;YC?PUnbZRy9z_90dAon$5!0$lA3gTyG^%{XI(^yK*c zsN7&pn>HjCSp&Veh7RLB_5^Iv6+dRB*Dnsagumo?$t2E-yPC7X|>{R z4zF&?oL5+`S^Nq;#2F$xM#q_HBSdEE_PJ2NOXFyO4%E3ckD7p}p%?y=5JJti^8iw5 zej-OGZuOy3xJ96fFiTn=7*AaE>I8CaW~wp}p~^lLG0Jc16b=&FDZ3zuse{El%KAfr z?iI+3?I|QyJoUx-9AOJaAU;HXVpN3xdvu4olA4%Z-1_)Q>{DCwy{Fl-yeRW1B@fp{ zTlRb1ed+~FFDfh*Yw$04d<|aFDKU?ps+i-7WmBu62Hp1S_Tlb4PP#}Lfz17IodRnr zB8JOKAkAJHb?)Y0cX@L_^YwC2YnyO)(E?(*m0}5bL<6^ z069RxZboRhbUWK?q4*D0yH?E#=V5af)*=Q4zxjew5d`Q54n zLIu7q)N0swB)^O7z%`dJsqp1jF#9N>vJH1W(-L9}bpZC;*J%T6_h}*xTX$tdPn@S{ z#_nS4OAGVy7w*s*$#rjPeqqsf4x5a1Xx^4d1y#73)nie`ty-?70wGtzSK<|;(^Ep2 zX>h&X*$j!eIUE;zXH1o*U(+fkXJ34If6A=kvgrZHf@(2#C{fly!Z)#1ruq2{HD*ZF zuLU7NZCZ(4srlLsPIO#L+-)FAu@P)ab+fRrRU2(JaVl8>LdzsL{7M@4owsI)J}T@j z961yDEzUQBj9CNbrj=>_seFmd-Z+yL&laD>2JglRkn8OBy;R!UR9EdP%bUaMHb>aWlHzTc$=m-wifgzOym-X74_&K1nh4qo;bYA@Q^|i<<4%8C;n&$2dyNBF0zl~ z>Uvy=)s0@w`0wt#4(26dUo<1>h~j;&@9f9N&>cmtjq#hNId7l8t`D*T|g|3<0O6zg{`;Q5k|m;lJ}uY|qSz@j_~eT%9r zYXT-&wm84E7|R7~K`y|O9v@zKf@X!7H3+zZUiPLImgb@!>zlvwUp$x!uHi+g+Cb{H z3VuJ7ZhNPv8QEtxO7Y&fcFa37ZQk(F8P8v1ya~qFcmq0D-cZ`K3U4epM*mdAqV~4zIU|7pG^b`S$$>q1(k@uA^#lAftYE|{j=``Yjr#Q{SX7c z_G`Zx--KV>PJoJigmFeCKvYVxc)fHF;kK}2OI~`ZGW_|X6dh0)4PoL{)L;9ZW(3QgH*a~HewIg|W=v5NIJvvtRFjM|;tV++KBKN=_ zj;|~%unUM&2=8FuLM6RHCKH<|D}q%*HbcLiGBC6@lWW*%nzl z%u1L05V9X<4-R#nyD3^7U{y==hLc>QxKr0V=*H9vgBzYMSR8?NQDE*gJ?Ag~&*%T( z1=d-_yJ~Zu#tKjt7~k~XYOekK!{#P z0mjwY>#Xj_*IIWH^m=@4Yg`gyOloFigg?5fvV`ncFc_w&eYL&wX4~u9xU%niJKzzC z5*%JlvY@o{D@Hu93t_lTBf9Dgv%})UC+D4}|B4UFRE1Wty8&&lO+*#D3+fmLNE^k4 zY@^6kM&(H_iJ7DV$2KJO2GB1ERS)egKMvb zK5&ZG#lA51d)x5?R^Vyr(gkT-tihs(SjQQB3|!>Mzigd&;U4gAUM9`+%Z)f-W=c7H z0zu9wqT3Ic7`W#PesHD9;>bX+dU^a_d=LvItKkUA(u;Y27xx~cX+{7N8o+e+(q4gW zkDrK^kst~7B3YlF1>q2U?jnZqItrYG`nH-W0trUYH=c^30-NX)CDAkkz*kzovMs)P zFuXu#xP!5x&CQ)R@l*!cqb~eZjdnjSfU_-7`~${~2XCJY%6ymitdPdpzC1#@VCa=Y zdV_108bxuF!o#Ozux5x#!vB4R%58RbSM1R$_0T37s279ZsYaDDbz?__?DslrHW+1` z-=emS7be?3I4}H8p5>h%EMk z3v75=#JAjVrADg?4!}QQOTXld=7LUb!HC%OXlX%5YQ9{Ri`9YN8o!HYkjSU4LTu+^ zSSs}QaivG4fD1c@tROsTe%QnysJE<#=*)IuYsCYFjhY`FOa|(%umS$=+iD`wYo^x^ z7wNaY6RV?n<=j%(xri|;HjxaE#<7pICEW33YPw)by~;h*wq%%+RD(sw!MT)jO0!pB zC^)!S%~##{*UP=219_{N(p=h;j0(`dHM3JsKbw6oUd+d)P|3A~v=vxZKG_7L6I4S{ z{iFCEd5Px>mWW9EX-mu+heV`Rae{M494$^>5lCZydaVu#LFOB!xO4*)ilM5oVAO@h zB%4*6hj=R4epE{=l9r-1se@55DnVWRc?6~7PhWmY(~7_Nwog)NtXr(#aS0G^j!O~p z9_Ukl{J6z$<%tEs5MQYS$jcJkD#kUdk$Zs)S;Y?Cf%-7S*<#QTr>&U2JjQEdvpjYgbq<}Y%x>y_Kc)^kPTtJ@k9SGzqlC7!9^D)x2SG%30z4_ z8o8RP)Cm$GAqvUwD553qR^l!!w_&HN_UAI z`dHM6hiTcUNWXx7p|jc+M+vHDs0lc&Kio3e7e(fR4hw>`6dOh5A%mIBVxlT{Q3@O1 zjLzwnM({b9RjBccG|aj?1*|EHLoJ~z!_ZRz8=6o*K^U)&6fh~GqQv#mL;#@F-o%2) z=%UZlZU#wRSov^xS_1UTV3LsJs7JquUa#s#YE~0fRmWD86^0?r&Eth=r|q@gW-!P8 z@aT*g0rX;y-{5QA;tIl1LC*np<{7n%e#^;!I$s6sJY7FH(O|MS>tv}K`Ze!Fi}cX{F~{Zwd3-I9}SNDxIM%@MvzYqgNfuL0fkrr?V;C6HpfZ*_@8{& zH(?X09)EZ-x8SEt)oH<}(YFkf&)hl)lbw!QloPrIN zl)xN{uXBGkd#Uih>ckrluCeD_gh3p=gl&mFa?AV(4Ct)4)5Xy+KfiQZ`9crTV%6>5 z-<0no^2-#7HpNbI$>%O`CUMHJOEwOI{y!jRnvZ=sK7%vkPb~)>v|ZcTHHrGIGoO#Y zuZXa2dwfLxnfN%qz3}WsJQd@3Bac|r21?_<%`ZL+0?LY}J~fEM7+R!O{6xCv{-$_A zGzKEv6<_VB%T&!4%lx2apMN!^>&MNsG&kfEZdJO~*!$Bz>2HD(fus_8Dr#@YnpZ9Z={KYWJk^pW@gVcMo7^OgB6Ir1=J#g+3bztS* z@Atavekj>rlJHJjZ(wK?kRtKL;cXcq*dN{r(<335XvbFXqToGdsF|S);|6cP+zED~ zQ`VEAUTT1r-#c$baO({fwx2gx`n9Yop9}a@*xaB zG98FMn|x-Ob~oSzhZG=!7G>=lQk;Qr;jnfC@y>aEM&pe>0JWx*E%KXjctOsD9Im+ZUh01rCq38-%FF3D5W|M~qH1xRrPo-dXLSoEXObcAwn7>(E%TByIj zcaIi|$IDTi5V!ML3kE}HcbFpfreNq~2;T^1K>@U$q+Iu+Uhf7U zMnwVd%g&cXh`qcWm4IxE5yQoPFR3dcmUm;@AoyU|_tQSZHqgn6D!}wE753Rp5fdvL z{r$a-?ta;@n!?FXPQKwf((}XbK^!Z@FEeRIfN|rmg_QCsGcZ1(zl=?M`rN7_azF)l z*hYy=vsWQq&aVip(FZAQ24;nuLeHMO@B_)FU@wwoiPv+7IW?awP<*OUOu*5V3ML%2 zCRuJOG@SIW0J*elHSY5}RU~$!7PYb722xUEBY$R1c0BQ%jLwtI&WIinrtLWI{Tc>s zZH}%o)`I`-RuRYJ;cd|yJi7&o0 zReuqn5fK!h(}d0pZA41>8aIxCY*4^Y3d~&j6@39Pg@ELGec2`pw)QI;7PSS*96f^- z>ZK0y4o*q@n8T=Ll=W4NPh8mw4mi2EXPKd})>i!0+bh(`OvMiYQfv(Fv3ErVTIQ8; zKbyl3K2RA80F&bB?@IRc0&@h=hlL=1)=h#~uqo7DvG7Lc@E+J<1`bZL<}5ZYxmoHV zz}DAXCsdyLOI{qs@#I{K&eue`NrC>;j-A9DJKsX=6v_!T?En`c?oydZ zdEsxzL&)Ut!T5>(smStp^=XcTvrV7m`no zC{Sre1HCra&#T7fJ}$=mSC|a6M2Ro8uBe!}NY#q^Yxx8xH!vdNo+=eCb9jbk7sL0J z$Y*pbFfCT#Q;&&wJ==_q80USlv|!Ug0jqK&rrMib1${Qg?@RushozSO4V2v6iW_u_ zxSEc}h_hu3H#p-suf?Z}@kr`O=Y5luNdQU5@m7$Q)a6)k1g~UkaC;ulR!Di6e8Upc z9&A5L}!pZb$KTXR?H$;&mpAwP6n^dp6jw}gi8 z(uoo3A81-|3#If!aP4^Q{tZr$zLB@KL*gnk9~?MCG2LB){u7v{UKOKed<6jAGO#q* z4IivzgnLIRmx+A6^6OVDfVjAdmF%C=kM-dI6i`VsCXXu!|Z?x-3seMJkJI=4;7AWi<=EHU|U}DctZDNGDxrf`$1Qc>}luee8?V-QP-!kM2jhM18+MaxlH%jAj-^^I(GQ=Da>;{EeRb0vZXjJ%%_`5T+L z0`S%!EDO|t9ivT|wv77xq&mvmGRyoeLz7&K|Lg4pih@s*}m2)Bf3bZ#z-k@9w;riFfou;EJhnQDEC z!Eet@{yyjA^(SmKMHaAy>I#BZ-%8> zYI#I{D}({%MBj<;w|Goh4gd!-9h2h5F^Nw-`^y$63c;0*H@zW5mG-bI72hf~R@4uQ z_>#&|+vamm=Y#&!Jk)J+gaj>N%2=rYS$^>;Hf@$#Q4zt)j_>A1qHj~se8Q%MfUthi_0cMu%$(c4=H(`bsF%w*EeHawm@;*1VI%~at zFyz9s1;6bT{hj^%RaM=@uIOo-S4#LeB?aRd&*q={8C#TEm{No(v8hu!)9$pu-cU<~ z&dTuj4)c`hXe8loVVnKX{SJ~MMNcpHK)l3SK+pQ^oP>09>_G?`|B)=Hd>3B;;#gdt zcSZd@dzxeKX*6oeP*7KGLkwDcPcN@CBUyHD=zAWoT>&u4NXS~tgr9d$*bhBe6PpM9 z&Z=LyhPRfnf7~#4W1@TDOcxuFRgZn=OhLv{4R6+&a-mGjaVIyjj^h-FB>u6V)xRzo9LzPinA=kHF{lVx zyhZ$g8V*|b8=BB}fDq9b{19Qy{D^LDZTtzOfqV}EL$~PP+$m>E0{W^juv%N9hzAL2?h14TRFq z=sB1x*JpLfdlKU{|LsEy{ifVCtY#8OGq5gveydS9;Sui<)PC|}q4dim{XjK@By`YZ zo!)z@KM&$!V1uik(HDEIT~3L)ER#f`4`gr?o)A|-DcchK zgmzOPEM7Yx6z4UyfXpZ@`8Aj|f!oiGzh&O~U&R9m%=}icLrcT|to`pFnycjW59m)- z>|-?6@o{38=bkP4ET7F~iN?dHC(kQkX_u)idvJj=*|jQ)`Jg52h6^jT9oxpHy(3-a z^Q6`2-RaB+UgF%Ig9NZp4=yA$AoS_O{R<4;x7Z%^42O@d8 zSo$tI875_yUcB%ToZZun@;IfUtler#2`)|vY*3$=%uEbcd_#7rh+*#ZRmPiItSmjl z<<91&>4QZwDv8MMbe;%N34eqx)NZW`q+S6bQ?&Jd;Cy_Ri%;ib$3&ot6k!lOAB@|x zyOZuBe*V()*c`c%lEqQwuWU0fXX|KfmJ7GR3EL|<-6U6F-9ZP{IgL;UN z^o`-(y&K*me&X|V_x$_ch#$sZ{>3kU$7H8Bc=KgqG34CfzM$ckfA#rqW81ul)_G^S z3Q$AqeRJL_u}k2i-UcI}sPdDlvMd7$iq;Sy)Le=FM2ax4-7mrp;sV4Xr$`OSu1M^u ztsdF7Q?!E=t+$TBHad|N;#bmtCJrpapFLOSsXt(%v$3EGWA(N>7V{lU35+_aq97IQ zjL8r`;Uc0o&0fr>a^KU2bSQf9dJIwn=@^GX#A-QVD*{-9G>9`m85M7UrUi~lc4G9(dhSbSxtJH>PYoGKK;%g6|UXCa6 zwa@?d^WT^u2oW%)j^-cnMjFmjgptByc#)APmT!%Vh>Dio@KtZayf$=`SVd>eDe|Ywy7|_65gt#>oUUMpF zQ-6HFj9?##(lXHQG4g~RdQ=NDhZ z6NKrCJ4o(PrEC6NcdxXH=ujDk#3+e=p$QsjH69@U7K6t}JWV^xoGhq&J?2-2bMY;J zf!?Kp7%tuw<_!!-ESd7kKZrM~rxA4u{w6sH0Ej_*0+oOJ`R`&wqtn|#X343*I!2Yn zx$FxGs-StiWgpg#FHRUYR^pmJB%V<4-^dCz-60MU-=)dz$V5OdaWE?IENO*lx(qMM z9G)4o@|@cgTCafOaBF?Gy!7h!al%AiAV{^@*^YICB-p4^>6jHVlY_>h3`)@-jzc5Zcg!pDK1j}k{t_mwT;i4HEw+mjq8A)*{F4!geq_U=)bKjX<1WMs=+a3QVEoj? zZTbP1G)c}q4jN86KB%5`RU1);Juabd8s!MV6bqtQHBNjBzgCF)V&E{m0sy^=9ZbO= zO|E@JBsD-F;lpQ;k7&pR!ozk`kD;fiXpb{xGPA-P!5a)1EH{i+NRGj)jh>piOndsp z7l_qg$MW9{O0bly$!v@X`P)pXl3IDCn5Zj)&|pds#_s5wS&Q=fov-3vHNJBF0%3>I zqk$gZy+q|?+F#X>_a*bk75%eV{~>qR_frEvYZ9}AWr4|@We|d*a=pxTntD#eT^cK1 zO;mEDALoy&MCdpEMB|(5;uQf|;d_>lr$nd~A~ekz0N)%BU43UycKYkVCT*~`)bE@v zKDvm~iSEDff4z609K)mR!Lq6L1BH($a^q-{Q@O|9;wl%>yAX&&WQoJtXFwWvAZUPf zJ1J?Gy$nNeN;KwTpLvzA$NuW`Uz^yne8!kPGhWHU(}m!_r6)N=qqCc}2}@6&d2#e| zzLXyD;we=x2%TvIZBus{e;6Q@?-(dB{B)h0e@Dd|7!F0x{USBv#{_$C0n?~(hp88Y z!K|9x)J)?#4lxf7N;Qiv_O|e%lwE_VhRRjhVV_OMboo4Y;pEd2VPkdyFa@BQoK|rE zm*4;VZ-NwCm=3dLqT~Tum-(U{DZIfu=5w(LVsGF|?X28n@C89Pj`yst_DYHLA*Dra z4pzJxJjTXKFFrgO9;Tk#W?v?1=jN0!L=A!wgMuT$OAa^eALGouH;;W7AwV;X=KTWt}M}pp(r%;sg%FRD& z(8X^cW7yXl?_xzOC}w5sf+^^IbW`f}31Z>6r+iYVfTm!67a&8;JTez9-(zpXzNuOf zjVvZsii9to{4(H|#7n5Boma6vDdl6B&w8Y_3cQI>E|>xk0u;=sWmTN{;Np-Otv=h~ z5gR4kX%gG6yma>K8tk@v^l=DxHxZ$ z(c?)rzjwOQ>6W9Sas3o^YLe1c0K5d@-`JWQ_}aOezQ*N-++KRqOs51pRKr6gc>sBE zM6p>^cqxxjh`*mW`P31|}(Sa4KG}1?Le= zBguPJ3S3qkI|!1f>`$a#L3QYS$lCLPA-II2+PYFm-owUJMGnC-!Yh$n8_^CMeOT=N}GuVK8F)zgqjFd2)&j$Csx4 zc1tP%rkQo2P|va3g%?l#dL)k`uW8jz$Hc`60!OJho2`L5qUW9GM+b9sRhIhNvrVa6N`V@q0*^|d9?KcX zkiSd4RuMFGD&qztZhc3{oeKF^?xgyxOmQ>*l=|K%{}CGdCh=^`wvx{V*CmRhCQ<)M z9-$A&uLeJ!J$V+g{oUw1g{!biiOZ$As%7c^bfjLKoF+cAGIA!R9fI6vuo=M^=qG%@ zin!`<*lKRjZ7}}g;V&(0X=A4ObpBTFB7Y$BUIn2l7HZQ#9k|+v6Df@M zU!bCExxiw z01HHVNY0mkhm>heU24ji3>^i+K8yLK45QJWuZZSDKqu$eLa_+-ptJ>u30a`Q`{=(o zpPTV*7Wac`m>eFDO6(s>og+^y36IF$o(OeKs{u;ny-u3F~V zlUk()BB)94(6yttIdQK!cYr%f*bw|Px$d|YqId;%VOIKmUNi7kt{Qf7fA9FiZts2&biI_ zJ8s|y8C*1&a8Ws$aYB@ipnAS!uZNSdU-1_Za>!)q;z7|U9#NqDk7!1jt)A_I@wPz(a!35^CC-Z*ThyNxTV zkt>Muxbc|nqf3GnIVu+uwsu^(8mq~E?C*NqMv?_n%33Q9lb}J)08oNC$pz!={NBT# z2bG&Opg}BEeW-g9h0f#^y|!iT;VzwuBf1&27Np+g?ogw|9f zc^MuWcQ-&(d-*tVt@OY%@BV z7;`xjS_&!yV&Qrs5k~Yb`0#p|((aJ>Px&f7sv1U^H#qke9jqp~(w}8)2Y6wMTG@H< z+A9jgy^ka2%hpCg6r3|A5%-yrh9Df)9G7+CA9rM|$0~lK{O6B=N2-fjCIgup91IG= zj#KAc$~^-EpagClj=a9}Voacllr1YmIorTH37r~zky1%xmJJy43M9om4j#%stU4XW zA_(y(%|PpaEB3}ri7{R2V|_R&lTiv3awS#M9FbP@^Qq7FsduTb$4Sf{qSWffBk`{B z5aPVZQ6(6MJHU1J9a4MB(x{}fx7N!b)53gagRuk8(yBr;l=!N=lVg->!80t=NG3TD z{O!(WunT?B0bskIin3CdKu2QX7}Qv>-tenoh7}R`gyHK1*cE_94h(EJ9UV08VT-_k zL4t=*zx})O!D%7{rf>ercNS~DjvAc)klRF>5f7>IxAT#2C(BTWx#+I2?yPM z(_ApfAe=hY<9PUO4O5uvk>FLHJtZNObc1Z1iA|_Qze6TqU^2w4M99cJYr%GH?W7&j zY^-Y3(ZU#4K3IWz%A1U(5P={fMfffLN^kVj!|-dnMb5m}_|N#1QsT*J8~d7k8zM-~ z0eq-_khvo6rCK>#N7A+X7EB<<$b0BrDJ0y!?-ag2wIVSZ!g zqc!{>1g*gK@g~e4@JH5w$3*4mgi*!C1`x;CuAm1c+VrC;O<$2GE)3coQ?LU^hXPsY z?V2)bL;)}d{xv%uB(6>?nDLg5C$~kutTYr-Ar>kc-RM2;Dtzpq&V_0`ib;nid>5~! z9I&`Oz`SPSvxwW7){L^AZqwg5oRlu$Q<;N=^cAIp&#h*D1o4ns+g+a&rmTxJ@h8$1 z1EQ3E0kVz8vwkKpBNR^tM~br}Twgj9APSj@5gVrBGHs7$D%Z5pv6#4TMQ%O>6~GEy7vJ-DKez@I=|Xc`_h_TyS`09HAQGtB*eap9RPN#tl4Kb zg)b>6<>Nuk$Ow&Xko5hNxS4yJKpP}ncs_40G0-MOQYlnvj-4ImHcbfluTP%8NSJ%) z_&C*u?r2eus-#TjmDLUYBvjA$a{}Vx(vw*5!>9W}=@OtYtnCY&*w!iHYKT;@qoNDi zSj&QfUbCd@lOjKT_`O)Vti9^9!Pm3#VOpd&bZUqu8bUk{x(qtL_5j~qq*i=TYLQ`V z^e^{sfQ=HlxV?)#pO=yGB%j)_EpJ34Bb!SZi)D!KFCP9dwt*>`;gXfXv4sxNgd?nU z3>itTpaQFAe)uG<5#(bsFI|%lHuw8p=q&)RL&C=K<<`px8R?&w2k>{1VcTj*V}BjH z?u_ApL8IIERiOf!oHDV4a4t z^)siPItgFLGm$rf0>+Pg7kZUzbT-kXlJX0NT>wchB*0VsSV_Q+p=k=ZlM!8{s^I`) z)MeBVh|{P60VWJ%pm;r9)OWdn9a?$@IhUSi$ZK$j;tB>4vZCAbPxM7EGLL|9n`y|Z zQWPn^uWw8>>u(O66ngnAqbT+EPHHlOm4g_wiYpU1pQIYEa{Ni#!M)2GfV`9z-G(JMjtr0cxWrWt;f=%RcFS6 z$TFnoy*D8NcTqg5wf?3VNB`)C0<4F?fR;2_86FPfW=~IKY_TgXR9x z8f#i&*e-C8m9LZ-BCdw;J$ZwIymUjsF2*9^Eq?8*33X>j*ReDcDH-MQ}98a zAI(9A#iK$63oC@=9K$ZAKk@#VgiG30Pas?dhBrX$%tR zr=NfRH8VV-3295VId>t5A&|;~-`ty4_iECEIYnL+Z}e7ZpIp|R4p1%OgIVU5%6L@d15!#wQSs5yy$31pBBfZ9PxVxWUj`uv*WX_bL*sw{ z+q3^KNGnQTmh8mt?L8hbb0+J`>FXTpJ90Oh>FP`cNDKa=q*iE7OP1W+j%q|@-+p;Mz=g*S^$b50_JwncHQS=sSf~|8(jhujy)FD7Qr)@avx&JHiQHyh6%-`G2NP`8 z5F+cxwK4C4r=OHXV4XCyVwJ7X#vPA^w%IWyfa*Qm_$4bJmdbQ$<_~$t-+s&xt&Db4c&npKsNTazB1%T&Y znSH2Ntdl-oG1>yJ$lra-5V6|01j*8(ah5Cutm4|W#;WQt=k{I9?K9WUsV2;kk= zuLk9-{DMEwCyZE|MDGY{QS>TMzGz7s@1vbFwr2398nR_fewzTiKekJwvS5f`{|zr< z4?D5b@5K`|GbeS1!czG-(xn;B))a73slEYI=hOmhWn8^3*h>%;OM$^DJ?R)Uf+OeM zZ(>T;-D3q~>ctKiLCfiy*}w!(F-~zl(zo&9elvdkVFEqVreWUqy2hH{B<`8U+sX38 z{Ep82Wng5wV6K^49enWR(-77{gXsndrJr-zSR4e&;!oUTXD6_AX(Sd)7My94e7E%c zsX6kcDrw`#=``13elc21F{1LDUExtcAb1O#qai4y;>;+NaxwNU1{X(T{0lfg$SLc= zXCuJFU9rYaf2CUTkgkWob7G%ri@wwwiv?dO5$&0{q3aXeXs2J$|X}H7RM}41w5%-ez2Di+TDU2|pbC_o#Vau8BugJ9zFN{5DA^8^X zsFo*YO|8i3YQSzX z6phi2V9gt(;8IEvqCF1n0wy7eRs1!S7nU26+R|a84Iy0>qrNNL!Jr4t#tjCROv5NE0_oL|zAfoC(M z6r9H8dHD2Cz8nAk@_#=3Gz9Nq!I8WM$y(zyw9yzO?jMBN&OyI~JXSm1l+kK4{xr;= z#TQS_YaK=KE%lFDu*iQ_!xJ)jv3*3&b+L|6gBZU>9lac=4$t-iDYn*=kjq--GzEq; z?d-M&1;Z#$j_}}Jxln3nz_fulMOJZ$n#lm;?)Y4uHmxgtmfj9N>7U=zC!yN>SQcbkld;sl zsWfw1ThC!FqNS!rn=p8c+-GnavsKAQWOWRfVk`(Sj zxf(0<{COEMj5Zt>*KX%<6tBlycb~xVX~}kXfcunE?eXD#MjH>nxE9LYmBO4i!%-Ru z_-$E`J2)*}z-VZIUV!U|#V*FTr5POL93KBSiRb_$DtDL&B@HrWZ}UDGH#eA^9FCF@ z3?C_`3KM7dd{J+4M-}7q?N}4Y%0B|`vlvr+h~ZaMcbM^pA0$H4HUtB5XZR}uKMru} zPx`^TqYt&M!of{R5&}O|lCXC3$Np2zBfkqJ(e3S+!_}h95IKd&0DD}OhC*A!_~;~z z?-4Bi;$N?$ zu4fPCQH@?LI&68EvBHn|+LJg*~F+tq{l;Qx*SU^wTqHWo9JohcpF+d!tCc9?>iufVd8n-PIUR{q=>#{u14sT!Czx8Ac5&oaq34)_XGfaHISo&0y6Uv=|)xb!A zysIK%H)z@|&(qIhFu8KN@e4$hN`efm5+1(Hpy9$$?O4^`GL=iClAh3503y;?$iZ?a z_Q%MxIAV-u{BzT2V{awiuO+|Lgfbrl5XM{{eGP zXzpoGEPoqk5=3-S&w|kSILM1uPUxS>V<~_S<@ZZoV^?)MejjZpFIq=izmI{Ec<||E z5`bd@gh|q;xc8=%M|5sjUB)dTJ;xZ_ixJY~Mb#<-_TQGv(PhUb>cgks_)c6>Kw?&g zuLznIpF(+IEdTf@m%%XirKR~G_2SccZw%;*W*-F~;f`^dUaIQiapUtsV=ms8Kc5jN zBnnY>emK5I<@)(OW)C7)^7#}d-<80zUWd7AaR~Z=I)6iHWU^fhKA6-$0QFyEkDCZ& z_8pt1fZzuQvD1F96QXeYl^+3a_wFd;AR7$5LSPr%Bw*6VF3Q2mIOWt}XSj}F^UcK1 zTamL|uqrsX)T|E6Z}y32s;b^}!2wYhtXS#hAnm6Y&*tK{3ri)Pa&$H*cpA)4XrK8N z%?OQeef!)0u)K#(jZ~z2Bd-Q0=S5OcHb!h}aA7?n77|1nbi0XqJNV#7bj~q-<3!Pa z#rD{eWl*3H4V8-2@#VtT(Zg#HZ5uD}BF#N{LPff44~{cTj;tXuSS(`#s%LpAVU^By zib%{kEJ2p$b$2hAQXQmeozMw97vHn>d8npOD*-D)6nxPweWLF2A(~^SykoxP1fP&- zZ4d`k7Qth0o<1vQ7bMs&{Q;Ix!WZ;4Bg2q!F)%jHlTfOGNDoO%Iec0{*mng~kBg-CfKaMHMV z53R}I?8qbFB&>~Up7;n$;|smTBPBh(gJlXk(ux?-gdumg4$L3r5${5I6n_}gOa?&k zGvkd)b7uB-Co~XjTtxfIPA@*Lai%8hW7|Zi8wa{qj5ivq5-lm4#59xA@x5H5*7RsfB7_6DgA`kk!D4xkZa8B#=ItvL7 zu0|i6QsB;Vquz~iL+j|B?xO<$z&~h0XVfI;B{sA-)5jwNEE#9EEQsJuGRNYRIzJex zNeV0($QWw4|_gfWg6qx5e7}BNn2ejCcF?mcVIA`pZ%)x{=Dq8_nW1EpJ=@ zDF3{ScUJS-TZ9MXP9{80(Hbl)1an#3Zrfz7fJcWDNhlqWepK@nL^Ah#ebema(O4P$a!R?Sl5?-))T3x&d zsQ*5%23q-GKbV37{nojy-uku9a>u7BQ@TQizx>OGUzqBr(x{%ZhMoT|FV$@stkh#~ z1a*CXcKW;Cy`p?{v#pgj8WB>ClUTm2oLH82d5`prpJiqXh!EStW0c3nfm!Gr$d$Gd{zKuW{(Q#&J0;>399kd6lln5iQeRGb z^|3(2co8qt0n-I?vRB>xtBDxIvVz7YBU?mrV9GVsaPYilLtseil!LIGrP&K&9`zlcfbudc5Lk?=blMkJ=#dF|!O@~ZVEYh{$!$9rLzO{ES+g?QNU zSk*k~g!D+ga(GGZBlN;&FQ6@{~YXCO^Ev+No8(k0Tbn zj@P4ddzkJ)pfss0px7YSJkrS0_+kT6#9dlkEahgS!=lkJ(oF-|j~r-x3n)VTb7Qk( zrpBXZF!`3DyUIZwKkt^kn5ll=JJUzK? z^P6A$-q*hN&%gECUo+>DP!M7i!nph1R!fkXA7HZ4F;1*`3*&M+vb2f`y7DsH#VfvW z)1NQ_{RGk;Ak|5Z$m0sb?e!}!jC6HtUPUCm7&sa(!Vur;CN&A_Sgdfs$%B7Rg3;Jb?OUIKvITcH)K(TyC`D^iGp9o^xpFO zT2V|rXa%v*|_~`98+rvyTFpd-w3dUibHqY{A zaKZ>bNDc=Nk-{Ipk5!X6FNhz8eQ#t6mM)dIWk&@6SyieE%$HUR;HQ4$8>NtvOMOHq z3$KH(>vjR^BJSW1 zyg^)ImO8n~+iDI+1+sSqD|*C$lo@|fG1yDhK_$Q9>v-!wtb>PF2KVq{2}e=sA(*$h z`2;t+_=^w))EE~#cKmUa?Z*hXRP2+yHv}fi*t~>DLFdKOJJSNjc+_{k@jZWX^B$`$ z%{1~P7d7!ko}Va{PmE!$+-^;6wD-wUF_8zf$Escxteqll3V8QL5QGwK5z7n z^?e|hTqOU!gWSp8pl~&I$y-<>Kz)~p?swTvxQ7dVT9Q*!Z;IyBL5Rf{#baWw>S5`n z0f-Kp31m*~XGx-oqRiASc*WOc7j!YGIc5|I9CK{*qYOpyM)glS~d5q0>a5;ci}v=TA$%mvz=?z$N%6^Jc%Z7mp_u*4gX%PxDJ9a}N+y zQyxoBrHktg2;3Y2WKR7FSW_s2kI7A_9g@^OMkXIwVk_;OB?g*DJd>8}Pg1`Tb==+$ zdJ!f^v^&8`xhi?%SUyOW)0Am%=vdnG_cX2C;pt_X(pVO#xeu0h#SF9t8=Ow?N`H_- zR)x>!Kw?Y;xvQ;xS`mf&FsZ{32sX~1?o+WKH5HwOBI#yXs|(v%ko?o+z=S2n!A;{F zvAl)Fa`yXbtY)$^Ww^F81qn`Q+iXctVKS{qu^fP`kR-@TPRpRpW%j4`=L(sh>wgl%8s>845!b<}vr4!3jl#nh2K?bEO6MnMrq`=nbp^mK1Is#o; zS`ZZx1-b;bjibYP!i@K;A1jD8Wx>1Nnp_-l)6Ar6Y9gXxMseo!Lh(xFIh4FkN0nr= z8vfb*eCz~qZOl0_07Aee5WPXb%nx1#yO82aUIs<68Qs7gE!yFo%nznT_?o?>*Ozy2 zMVF`6(=paTaiBXCzG3^7X8p+_5FLMeI?$|u8*s0{rRB5gx0PIxv1b||lR#+^Dtp(+ z62c1Q8n47>QgaD+;+Ar<_9W@z9M^{zScS0w)z&6q=&xPUxeUSh*)RX+xcabFgIJoz z#G=QA9{>~3)2}sq3Y!dAZOKjv-B_jaKH*YU;ul~N(1$eE30{qT`llW!fmpSeI;ECk z^%NauI8+sg&K~b^L3|vrm#k6?B*v40UkT=1eDd7;1D|u3l25}ZG3JZE*Fr_c$gg*L zo4yDA-kw^agm(Gvw`>--xHVGNi;Di*$rZ}xlpodyCypn8JJiIusU_^E>fR_WAEc+j zREFUY04}Y^!`|$znCL*jVOWzHF!BpOw!gEw{{~eXf5%(};}zR_y=iKZv>&Fz*X&ot zyW?w}{R4sr-m#`X;T{}i25uUJRPZ^zf{LF-Gc|}uY>!@7HfUx$6RK+4e6l<&qi@ki znDPo=R`alVNlbC@rD%N`gD2h&b50&wLSS6)V+F@h;q5dYzWnszFCKm`zPjo;5ZHf! zam*mFB?>JG^!X1y|HYS|TfLFiB@{jF9AJuUlC_FwFFYwDnxt83%_~+`N zWTzU?8?)Z5dNVGj6ERi9D>V@r=3PF>Epy0!oTk!y0ZHq(@C0&b8 zx1!&Q%Zx%yhorkhXS{>H*@3}Uk#WF|^fC$kg0k_?ojcvc*yv;4-G65IHrV6%GfH$& z06E+Jj`vFnzWh*2U=-`{$O%Fz_azXrjsU4BDM0L(L-!QNv;AR%hI9o(4O_LS(P%xmv^-fcY8 zi;>WEET$tjxJ6ZqjkLv78qzOE+;S@PSMgMC57N$FnC{B1b6y7F!@Ll3IYrorg@k%vbh4C$3?5@N_s zu1-eyo)w zXfNCJP-b!7f0%_{5XJe0cQ^v`$u=pl#72*?d+(B}Okm6xp|&xKPl7pBG2p=Z5FHw% zQLp`GnpqIjqnKgR@c~l)t@M#)`&SpzoC^GVVLipq;>Sm9M{wa##ULRycZsO^6x?ti z{!UzT)n+56kCrzLIL71QC(28U9=t`X)jA&v46rrn6`K3R4puxFS(}y`b_4p*&+Or} zCh&owJ>cdOBFqe&a?G29d!nhn5Y!3FhBdV*axUf@;lZnzP#V{GW4G_`Uycz%mA-Tl z8n#?by}D%Yz!xG7lB~dL5!p%I`2?UZW8g?Xbv8)#TxgJx9&=o$#e6_av!JIS-r@%U=kKIl;++Ny1}~+>*}9< z%Maaov+YG3l4iCAjIza}k>;^4H^#X(dtYvuhU?T)V3zt9T~3V%l3(@W>6~v17BA5l ziI9*%Y*6-Px~t!M{4Wk(@mq9sd>o&Xi%|y=9IBxM*9BIJmvUY2(!;^zXpPV>O4Jlr zoJi<-sS@r12vM_(v>N&p_IDGD(cA0h!zFgTUtUk!*);pm901Q^wfK{im6|)2)y++z zI!hu&GRY~PWD)0@#Ay&4wN{c=t1B6Zg;V=KC|;mC7-v3p>ePJx+2=ox+e(Nj9{FUh zR$Hy3KNK#vz`~eo`0>_V*QcsYsIdX7|;u4tDmP&3>yj2n-l zLt(;pzxQKv3U-rxAWSd+Zj(|A5=i7TO#k%}6Gm@J$O#6dv@l*qB!TV4HssjNpuxIW zMrr(ApAi@=ffX!Wfm=ROs~wM%TB^46%W@SubXOddlJGx$_&YCMJm?a%UIN~h?O_P!=gBFESis*+KoxZM7Q!z>H8`( zjZMBB-=0l!wkBAq$qgkM0{>f@jwKFJ<^Byxt5&EP_k>#A#V2vfmjpY!1sDtw!)yJ2aEb3B~3??knjF9t_w!-}-n^MIGZIRh7hCuk5R zh-R#m5|;6QojnrZLS*t|S2RfO?J8b)Jg%kEd&ET(M!WPh?O$vciZioQ&o{p5a}(;% z9Z9jBPQ0o;i8cSYdCz6=>!H00mlA~nEYa{9g09%e7U`JZGW43iX~}bh2_i2v9fReK zUB3@2vSO2tG}h2nMx&33S0^@8lHzopq(;Gxg+!zbV&_x)|@6)W&tI%=DN0Ot$f2I;f_fWHXjc`gE2i zqKCww9(@m!iKUK?%-Q``SR{!TziS@lPp3sK!DA1f{-^JH2`JobD)!~~AAS}Wh`L0_ zjvm!Sbs4aHRdenKf#J;|=^sCq3dgI_$z9QY6i*;R?hAtuLdImFlWs}!Q%O^ct;ng= z>4Gcpx=ga5T~q6r0=I#GwQ?5xcimfrPCq_FP@Oeb%{@% zgPSSs<1pu@zGvAx%HB(1UImQjpYcy?i4$>n-wd0y6N6oYjU`D^ zayD%`VnsJxJ_fF4jetD{Sy_#NE~5*)pS(eB(#YxmlJnxhSkar+5q**v>K4O!@rQ9FheXHdJbCC6uctPI0W2^T}TI!_bRpt;^FX(ay-#% zdX)t0Y1E^}>(65WF#nZ@1I$`-3O8r0g47ianD9Hk(B z0$~HNVJKSv3Zf7(r4H6tk$N*~h1(^MJWD8!P3BsyUe*JxuPb-qO9|biNjPb>+GxFS~2j!Uj|PB7{qk)ccr0-Kzb3f%RK;?OS`-g z2Q(d(o5~(C6{sS~7)A8sJ+kcVyWv6M2)n7t{-E~Mf+)4!G3db=k89fXbf#;PN=j9P zdQ@zPK5m%XM-kRP`WTCOi(T$3M-;*g4a-b@lga|yL;S=vu<2s`A3y(|Nyc6@3frv5 zxKdgE-N?|k{AhrbLlg+mrx4FNOmM5cyZh0mdLL4+w$YHHK6WJUnVinbLn zVMg2Hvyv4o*+$@F*R3I4ynOE2lI1EIo4g8e;+b>XDecM{B-CpHn21LkBIfE{&=(xo z-cG@TuYeZto!_lpB$nZ}Ocvj{zhGZ{S8nDBGb1P;!M2Rolc2@r8eRJ!y4c!i(0J1_ zw>ueGq0kD?p!-q*fE-;+4%>)~c(?{;o{}%{&xAd|;`wI+g-EM7xd~C|_&0i}T&R|- zt{19HymnNKZUq*ozv1NKUJ=t(SjHO@U}DCn0@9W0as1_9{PK5II3sjt z;Q{7x%4Z^qAFrgBtXjCl7Ql#=WD;TyXS}xHWm=d!f)jqv^ujLJ&=+Bvxx6bXf2U#@ zgq4PEX9G&yl*YwYJ6lJc2^h$eq6as3cCzT1F)w5si)loMz&f2!rn#ne`{+o?|A&Wv zXQ{8~7QJC+M?ni~cdXKZTXN1So#aJ0RH0T$6 zKtB;_zxX3M=U^>^-@ozwZ<}bkjNCp=9hsG&<=kLek@T2LV?Y*wkwyf}l ztWzH*2Ttm-qU&k1@^v$@nx5aHrVhB(O{OpvDEsNWz~9 zQDYIHHqLmsL9m5|dB5nb+BfnJS*-X+!1LJY}%WG>l?g?+=l;2Jdh%J$NFi*QU67$#y58s1aS z>))s(1WA#&l%*F>y{oH9(fga&C6 zIbGhn=ufVZ;<}9Wkhi}&h({7)+}ntQ;>tjU+Z^op!If_dPnN7d)l5w#CnbQS>LT(C zr9xwA?unmEKP3{?-n=?(JoZL3DGzC&sL~O0(eQdyBKy4$l*-5zGCCwaL1{r5@fi&9=tD270$)agy=#jK79K2_v58dQb!cu#7cF{TQw5k zs@>?6U-S=52KrVJoUyb1h<|v*8;3~TTMJ?TkH7v8;-5>4Ph$o6X;_(gy@bY5|5yJR ztLe6wguB#k503~7dFI2zrZ z{#mSHe5ek##Yx(w2Ox)D0=V<_q8wgil_Tw@gSH?dPshHgN?PAcEy%%zi73woBdOte zs|ebsSDZ;tl@gt^;y?$d<5UdDmiy|BfQzU3okXqqRb38Fk^4y7IFPS>@7wXuzxTE8 zc*#BEjyWjW5yhuy?&9Sf130X8PyiZ_3L{@@Y%^Ffub@FwraoX28IO*tp13Y*FLRom zB9&D&u}s!u#C5HWjMG3AnZxD8AVU=pk~fl%2^!EwEskH|=zT)JxAwi*WiX5j?ET zK>LgrOLg7?@0oU3oL?}*DWxlz3DmF!uDK)6f{$q6y6K}|-zW`=q!?%}=U!k$6epPz z;fGBFCzI@7AUnjYEPDMojvTYk7ZO86M^ z9uRQ?KIjUP4mT82`<5!=3HoYcNp7LV3%v$8@xJW3_WwB+N`9s}=xYMWSnfzzJqME`0H zF4ZWk%tbwXT3VblC#bQpG)cRFao|KCZVG~e5^6jQVK(9e0uiNh2QtR8(VD@ebI<2} z;y9`5ZiZqp6%a&Nti|Z*t70Hnud}z(kAIIY4KQF5VBe(b;q@rw^a4z8W!Ca1oW_P%(I2IRo8C}=`0#H+Y!LxrpQX>p z9>Ls@j6HlMwF&YLh+3V>wv0~@y*cWlkKz-GhATr+atcJ+pnQCQ%%92~XxbG-z^J;S zK2IXv)E)_&Td}EYbiq$40dsS>adZq#h-Yz!DD^y6^Vc|b2y0!202*6$r3-coOaoVJp{EaU%66Bd6@ZJ5$A~D>LvqsR4e0c@y8jC5A92aOVUxt}OuOxp|xi z;lR5xr+V7gyP8q+&liLE&*$fYMh~Cfj?-8kYq{oL#8n+DPPAiZFIa+fD|YT|DCONq zAU?GfGl|uwURmEfc*X*G%dLf9Yh>sx?;-Zh=$m8UNkKE{LE%Qtgm>5XHu~8bhQqCs zAQ2wKp3@e>B{FymlLf}{xwpQcLW16w;kJtXLy=JV_u1y6OHiu0l3w=8?%u0l(dGM% zOs6j`WJh%c);|A@X#>|!i&jmLXdz)=gFz*A>KQ1~i_%*kn=C6Z<8)svJoD*-sE*&i za+h3XZz~@D1Wp1^l-h)AM8j3Dw1*dgrQ{YE$WdVDQ{T?~NzYjoT?QF$a3+FwKgnjW6HX?tK;H zK|1E-OT|mok!Dc(AfKXl0d%;Lvg9@mgxv80Q(o0k_I4FhLVm=FkIoW)CUIkCr6lV* zZt}w?vI2Dfx+`o&Ey812uj`2j20GrgN^HagC;oweES=#t5xuR+MO zZXwoG-`Fnwjc$Am>wl)K{0DT^br%W{QupOPN)HkmjPE{tBHW)4*8XlU(fS?a0*ye6 zs33<*xk+po-I}lfI%7E<20xL<38^4Q`r$A6-+%r258|cmr@yd76B>uD&xZGCGlT)^ z51ri5QdlN~4`2<9VEK`fN#n_TGWfZ|wMex!+a@(>9|53Cngqz%qq8D)cfpRB{gzuA z*zevT@lm2-R9Dolpeu-IB^-P&9s}2pu@RqaYkpO2re{xH1Zz-S8Px8*8>P3u3?APL z8GZm`XfhG`lbzsm+N89|ual2SKgKu-I*+Vuq{=2OFwOxiSd~+?>L@douz>!F1>D3R zEKI-KasK7r6~kzE8K^CCRbW6|0>#|Ck+ad;LD^S3r}*m|>7GGB{HxW;WA?k6X#3c_ zKC^xGE-Hx&rgD?-41J6RT9s@E2vCrPwl*BTN$ zE`~5tKlgSqxdvW0%>gm~XAohV!&S=DOgV0s!o#Vr{J1`!o;0OwaN2~7Y`1J8T01&1ghc) zu}J-Jyy)T6(`R#;78z^YS_wuFTE(8Om8UDc)!uf$XBl#hgG-`5?&fqboypxx^y*2-|ftrtH zAiQp~et+@#6F=l~@RrjmIgdBvixbA_l=t+Mi7JSRvOO&3(MKO;!}q~b=p|ccXp;Q` z_U|Jz%_`s#aLl-DK|ij%qRDZ9@TffXA}KqvI}KY`Z3ca2$_Y2oK%?f^J8;Ia*e3R>t&c@z) z`4h5@i|t>KViEJk^6<&MhyCTutwYf5PR4k4cjIKH$wcGs7u>8-G;-j?qf{iJZXYl>GH9xkijx7-G7`rwy` zAIIhHy#&#u`q@}HH+Ammv-kTlvkPdc!X+5XmbILLmt|3rE8l8um08j)YH{|tI!c%wf^c-l! z96UO+%tj#5r&uld;%o-Enf^QMkA*>4Ee~nod6_Bgj&)VVF+&^ZEH8 zHkmFuOL*aM^yIdLx(Kt&U=0RYSeo~3c{jlE2J4u?jvfwukvC`e*Tj~qS@njlM3$h~ zakt4=g=uuf05=2fC~OZHA2Z35Z)g$#abl zw-PkLb(8l_uTnb^{w#a!?`*D^E0uVfP_kRi)9t}+ft}KN_Uz`q%#eXi6qUrRtZk*a zt6ZYnsU_xZ9gZ&)H?jpSNDwwhuq1jWZZRZQx-@ohUB*l(bl4K=@yCZxzx^Arcfutm1NW<_tov#-1u^)-m5!)&%`-Xp;bgSJa~L#orV?F@qMg>tXTfD!Ta>t z0eWRlmf66jWouQ%oOV;_5XX^N&qV@cnxH6uzX4harZxe7@Hc^=83|6Oaq?gmjMz!R1V7*rvdfcMg z6SAgBtcydrT2O^hxfUanG9JVvy&Bh4D|l?u;4x5GyVRR|Grf-hX@=P<>#JTlX2iR~ z{AQ`vo~(stgQZ&JDwR5Ri`qc@Wdw|FiX`v#*1Yl)L^V#Dd)Mvsdn;bp{=KIBqy{}c zDT5F32AQf|$lL6LSUIsi`WL9E&riFfEDUZ6B2h;1VljRosOby}W94iAmdpFwpOh}P z)Z?OFFH%f1u;<^$ZY5u47@Ckhk~%1b=?$(TIF<66_$L)7v2r=ya0UzY-dcH8>gkQW zeQ%0VZ*@WOw?SX1Oeg^fU#l&o={<$bn3G{zoqXx3Mcr@^geDssISN+}X|8p%v+R{! z4~YjAlN*bdn0}T5-Cnjtlb5SJ(~L|$Vfy;)g+|T}&}|t+ly<5S1_okNrCmJiWpvdU zsL&KzYXbvdSC~#ybTc8oV}c@H(0~aOWeD~q@#wttRQSU{aYk?L0=0n%COH$xDLk*W z!GQKhML4LMm{X#RA;>Fmdcr!F-*`PF!DT#{!h+6T7rb7}9Xc__g#44$quyoqx-otbhoyWtlGxt03 z**bfO8pR^k2Op9$TNkV9}%t>+D?{S!y0Bl7tZ0;e=&luIT?O*u%~$kWL>USsrA ztgg!z9UN?lz%RT2|LMy`SNhw?^(4J?B#u$|h zD;gjQrcWPPcGSnW@8!6@`=7u4tDHcXe##vOXTVmwGU9uYR+Hpq@)0pjmFH*WadO6LMZHWK} zTOzN0@X^OwGVJ=3QDm7EM3VDNazri-8b}g5GD`^=>IrD8Y#QU_92%C+=vD_nBo_l*{ zJuW$K@nlj-cgL%CoI+A+XWP2`pyEW!(}e2^faX%*W%n9cl>0nQPP2@7)1=D#NQsOy zKq^lYXXEw@(?>boEcCisLW0S`@kLa8Vp8OwnMdF#Td9hqziY>XfC?I~dtT=k!QpXF z=}^!k5A`h1$Tlfij>bDPf9WQkjA#Ir1bKx{D^x^XA=z_DA-2qm9q~UBD5I9BaoQyI za%L_VriZTs7uJ!vun0{yzqWW8L5JdH(E%AIOV~e&zI%*LN@c0u8yZZ6L_jc4^9EP{4`kZyLvz2S{ijeAL-x97+(ntj0OfWS^4SzTOhrFeo53TMV z_a0Qcq`&#oZ~spX1IN-uJI1SqarU;dQg=4Z$F!?V13Jn4<0N~O;Fx1NJP4e#xK7p9 zT9IHMd|W$_o*N}D@S{D-$6a52a=UgFY;$J9irl&L$SsgCh53V~SP@h4VRm$C$@zpe zqH*k!j?Ixj)NS%j8E)+qV5Z4((+9GX*V5)QOVc-q~5 zQH2K^?}{{0SBI6U7K4BE_wDicK5X>Qh&HVZD4bc2(^IJxFbY-5yzu6fG{jhv#FJo| zlRlKp;=q%2I6DtNMKMPv7vfG*86dXOO+_=P=>FH&|0EZBt#^*jDMRh2;L7sFa7UqL z;vLUZ00GHdx$pkb^xZRtXo+C^P8XlDJ++O{aN#@PBXb zYF{;>f1^Wd0_&#Kdi85uZ#mIV3rbJs@?TM9frh62Ov??tHEhTn@pMU6QqqMQd7o<)*AH-rJdJ^k#`h`bpO3 zFCUr2r9R-yB30Y3H}l0Cm(w^%L_}J$AtTtDGo&5W#0|-va_ItByj&1Nbf;y-O|*Ps zymN=8LM_u1cV4~LJ@{iPvo9|pqg#uB^-2pb1J#t=OqEG&PykdUrx*Jl=R-KI!ySHm zs8uE~BL9MY*eR@$|ImwGaIHX?0Dg zIq{9f?jqnfsgU}EPt4VfHBWl3_w>Q|wKe*KFRJzwD81TFUik9D+$PWP2_c?x%CDYn zZ&eWe@kux+@+ZQOM@6eQkqwIjs&?kA8=M<0Fd}|h`fR?8L>in(&S3~BN8wD)7o#(g9WGfqsf*&#Dg>UPTnM~E3=Q*F ztBneQmpNJp)C4eCNNbbC`21}+tueiAoY4~`>LE1p#}uGogqc&4cZYYyv;#`_|E+L+ zG{)YyHKXw>flgzo^wdLbHBE&!<-~Jx<>EBC8hT{o$fXp8NOzJ@$H&LYMQTBX=;q#( z?tIkGKi_`6mH7zkGq?SJMOfyAl{Y#LVrTlb<*j{2^fi|_j%AxzQhb=#^HV%AmC85& z-oIqgTh(A=vDz!e3qAOzgS6Kxykx&bI(!0WvN&pI&?kGO*^iCy@$AsJ)8^BcIanMu zFJD_@fsIq<`Zfpj<3G&Dht4yhZM3*rNT>PN(Y#RAZe(Tt`M3YU7vmPeY6oxQlQDIqXk8o`* z<_3ZtDt+}V))|%;p{ab<23gl6sB3ng&HdP*$1RLwzBG8Ic}k)n_G?$|oD466wi7#i0zf?MYpvG)&NX1dc~}jM2^) zetiD( z1pg_=gX>uvMDG~rnYAN0U%qD`k%2*?mFD}!>`SfK#@=3eY9=%ZFGtlYPIk~eZ{Jil zG++Vkip4l8hrB0z;)+qh9&(Ez2W%(ZtUs0f(IZ zV>UxB`Gb46wK}I0n%Ik=sJ`e!k*TX?UGL!74vLga zt%brN2Yo~bIidbjI?!P5=H1Z~vAk>)EV%3R=18=i1PL`0=NGFaFm8(254l~B=ajf( z(&ok3AXsp@FdyH&rx|j5J)Bdyt7Joe!P%Sn;5b5nRmuk~Cec|z696gW47bqjT92~4 zgN-(w8nIqo7K6r*$m}h1f=J%0XInW;>Hv~3*exd}H;^OYc7XfjTb^(2K7IKpr;pIt zgOyd{&-1Mpc_C}U+U4x<)H2&=iQZpuq^IVF0*z2JKKhbckQ@;VYGq0u!U?ZpY2bmq z@1=Z@uMy$>3UO~f-4OppvZ2GqQ5nUOKKbNM4r@9}K8}hzpsszxAy?V)#|(FR^`D|! za;2vJYJO4%I-TNJS54>c?ds;y6ftI=3-Rk8{6KiQ- zjtveAxK^Y4V_7OozM|@wf*nBXV z9(zWg8_DcV-|l=|K-{^QUgl#WgXhS6h-e+i0Rt3HHj$O)n7Gl+1s6pSgyod7(lU!Y zoPh-$F$K195(UKvUzZ;$NzMMqvG%GRxvjZB!Lb&(yc+Zl%BDWCAuL@EN`+imp5_B0 znu=$@_#elqa5!k%6PFgqT4$~Kk*XVKp7XY|`?*0w>Ek5Q~71cwm@RXuip}M}WLc zisxu3+Qq?0$qxD2p7r^Qt)0y>18G_ScT4i&)|IjZXYin8kn*RAa){B9PXk3^t={H6sKDgn zwmtnUE>UhQfuj{|kOkh}NZ_$WE{)2f$I`a*Ocurjw6M@AblEwz3 z;48YUlz9moVvd?V;BZF>{%@)bD!+-k!i9|G$nN9V*>9Qxsf7*g>5>#yQ<0)gIU7*J zZf)W0^NR@ya%CbDIE-d=IV%W@w}X(6%O_|Kz#l;Y9;@;GcR1T|EAe{2R*Ha7fqZ6i}amY!uDyZ3@$k*TKUZUrx ztFwwWk!9JE5k%h|5(%Q@O}m6tt|&ti9G^swQrkuw&$nK0Z&rrd{)Lq*>|lpCGUr&{ zynOjc&yHF=Q*2JBs8*r`b|GcsZ4>6(!#IhMs+h;Rb-^MUQ@OfzF`v_?|6Gc3GcrlX z4@EbhMnUfiwceIwcjglL2(>JyM*Pd%!*n9TiDJu-XaBv~ z8XUgmi7p3p(Q1d4<EElO;iJZ;RL++JHRp6osq@mg2riAcN=K9zT^T*BbN)6iDzi>QZ0 zXzRXp7jV)lxKv1d3j2@rzkFYdZQHiQ6H@lmCm-F*@pH@+wyz$8E`%y_P&s4=Ux`iA zP{-4o$bq#E9vBtx5xi)CN#;oO$kA}b)DmG-VR?@JPKrO;+|es0sIsLZyb3QzKH=f^ zv+dpOt)0B>{ztbp>VxTCoUdakgnR>eawU)$9?HcAnGofI*%9fF}-+C1)k{4Q(m9K=~V0vLSzF>5-Ttv~0?Z6T^2`Vp-_wbsRenXB=S;7qX z?%i90&*v|mZoU>gciv;MQO=#%-QC{JKkwZsqlKdI{Pu7E@OK2=`MeHOVUw*Ehto@~ z2b-5N6fp-}elWhsC{tgi-RU1lN?=&w-f8t;8enAq#^m(UN7H9Xe{_Vm(~7m@K%D7m z+q4jyXYUBsDgkz1jH1j-LH+b2F{LXO*=SXT4Rm>_hyRnj=xsx%2|0(0=79 zV(2{E-ux_w+JI98T}}=KZsft?p&QL8hO{~@S8*u6l|};JHx)8sj&^q6iYD@C^V6wm z`0TYfyL>sbl**S_KP-=AZ?g)3UF)(Sa_p*oQRJQ%z+P;<+0po4OfRup(VKPS#?D6W zitq?0{9d%fel1lW{gcvR#^O8w&#wQa)}KW^2-+mgNmgT-{090^rV z+dF#L<$T^wfza3kC1i}bG4Y0YFkbZ>GoH=}tL8f5k)^;%58wzxT z-o)bHx?fp&Sj?wpVmzXAh56m01^EJ>NEGjQcYR?D(t1NI znRAM2r%xW_Jjg29f2tKC+ykFqnNji_PhDQjln-iHBrsV}7wUu^3y;r>A4X1BvaS0c z-$?{9nif?^fIixK{W|Xg;_r1{&!h_mV;@D^B!R`}6#Y0eMsS(XucEgHRJ z`1)#k9%ey~0LMW|%CKgv0+@~Y7O#N>5SbA(`EXx8d95|y;U%2KsUdOZ;O!hRN*6h` zs)onQ^C#c_a|3OJUn0k$RyNoU_FN8ud<;%qMT0kde~PA*i+wyZ|1lY(ixbLt z%p1%&I9Y2)l%r@kju+0s!~v%VL}ha#uVT-iS2+QN2l*y){YAn!t~NyX#L>IGv!g3G zTqF25*XcX@$#Q5(^sv-;KK*=$j-6Xt$%h*|TcX>YPpqDSm@v_IR@Q>WPhpybp-sP{ zmu_8`k4L8c^PyT=YE7af z_~pdX57;Wry3AFkNnHNU%Tvu|i!_^y_uEw1C!sdCH-wGV8Ic0LjJgPR$;DLq2Q8_{ zHCz|Y7Xu>2482(DokdZ1LZ8?3KZ{BWeSApYbNdJnUzu+5YU2-bR_QskUn#fHucXYm z1^&&fb`13UxvqPT-){2WHtXTIBFLsJzP`pg7D;$(SGP#e3apJr*bAZKXm+pL`MhYb7F=(w2S+=7gB1E%nIpT? z%8%H&DSgx%mltS@HecKV_GsKRtu{_u;`#Qo;u>>WDpX@C60(A6r)lu_epmNU@@(g8 z;$q|2#Ry@SintYJ1Jg(021rJ5gz0MNeL>Ica_f-MWpuqJ+v50w$Y23>7Y|2qPit>dT2@R#S+q?oS! zSVo$3<)xfUI^DxpDE_#6N#?8w#1X3Dp36s}>%g1yZn#EE^-aD`7e%oYTs$Pr`MIsQ zq;H~ldL5~>o_X}@*?dPY-+cLE_w~!l)tG34`B@dYf=_7NQIccAIB1F5s%M8EPpg-kl;27LxMZ9Nj-+nDrE7 z)@A9ZAK$ifKHDiKR%5b>OOqD}%QST7NhIZnPR}C!QsACZZfN;2Gax$ZEB5*0m*q;@ z`h!=wETNBvb56V-F?J&$M2|)!s@!wyqka^@F?%Xef_7U+rCWw=d2B;sbBCi)=y>W) zXgGx%jVsncv!?G$bdKF0wP1x?jTfsqR>-@_!66@PDbMHRwAdbk({69Q(R6X~L=mfH zieO!lmXyOJkwb}G&$lE5T zY|zXG6UV357RPZZ=uiebH6eqOdQ!WzHQKHW!R=sM%Hz9t*V1&O7z1ifZ5j8lbfW)> zANn5tEmNU5t<8H>Y(xi(T=Oq|8A$O|vkBtCHNq&*^9py)>aOVe#_TjTl|TAGNQiW7 z?z@icz3=P?#*V>r`KE0wnX6P;Yf|j_k07@#gYGt zAeIb-F*nv4TVLOj)gGA}jU(vs@E>cuys;o9B_Dl#MYEAPMMmZ)BK}HS1q-zxp>Whh z*gBsXp$Vz$+gr~brObI5OyPxEsVSL2O^QTwX&L)|0^f$A};}07TztE~g%Fx`a zo$uc&dw$uh`SKVOkNS$vQ#2-Tm7;0bvZ=71nhice06Jd8{xI&_Wv zS_iE^Hrq+pHCM)3eCH76%aeH}bAyVbGUfKZ6acW|G0Lab96BN*7pQ5`Xk>2sLD++o z9=f{(?lCnbQRjUbpzJZ4d%$p&SIrh;w+Mu=C0qeH+DJsTGye#;gsDu#+f0hjTTjLu zG^J$blZZo`M@-=P_Kw~~7eOYJ2#&V=C*;?Zaz{qcL7D8tIQ9-oTiJ^&-;vDz!h<^z z`iaB?F@Yx}%u}x!*&D_<@2c1fEsrRTNC~{eV(Fvvzty`rLar%z#>_$jjd%f35>ANF z%+Og$XScP3gGb+CBm8WR8?)9voH_dyvbc5EYBCNusbUdz|VLD`Owx&)x2X2s4(^+;&tFdV6 zi^)n?Z{iFQ zX>)UupnxFR42e%ClTz~@&Mf`z$)iWFHR1?f#sb))hKAUZ<^_qz;S|uEUXz1L$7Jt% z5+a2&*rIogahYe~)i=*gYff*NN4u&l9MYe@XR^@=h~T$3up`YCbxnI1CvAvt7D(b_ z>+8rZZ;C^bi#C95^XKPQ0)efblzqM|p9T960bfVuJ+9DqY*5aH%TTrM$KltE7 zQHPqUvv3}#z=y*dyv{F5jgC}nbg8_K&e1iZ2luQG{KMH%9P*c&@UT^ziU@!X*%eiF zspNiYb`~cXMSMO;o71P3DEfU{jzMO0Df{#KigsYQ3 zx<0r}qNu6 z1OkP2nIL;`&su^-QY5M9`P==wmNf^5wYo%9);Qi~tqE)2#%Y5zTYIsaGa?#$uN1_e zwSN+%oX?HXNRNo95Y6P;<9v!rekM$1=lFj2FaG8qT>r~_F5zlggj?&W z4V+(;=SA?Mb`|@g9tLBZX~mG{K@@X95Husp!t{wHYJG=SIYQo@s2?1h{dhq&G z%-Vfu8^>@wZMjcrBRaF~{bV-B&2XkX2J&#sK}MBrgj;G_Bxpj5igQFGc~O?*k4V7T zgSm+vEThvH`(IZvBU)a>K?BJFNt879vi<70RvLxfTZFRr+5wos*7T0u2>jAeM6-O& z3&c4&TofLmUnA$;qqm3>dg_|R7xF69NP0zx&pe`PCrOTRE=vHcPGPlr{u5F%+)*Fq zDE?S2>LzhdlwNHIjBBD5v;6f>Qu9(2{Yt;wu_JLKD(CrD8AeQ6n^6e0X$pdh%iW_{(tM1&M=NKfhjI z`S0<6{l89wj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JWj)9JW zj)8wS4E+1Q_G`cP-|}DlyO_V7p3T0QEQYayS^GO?C(iDDE=J})BJNboaLVjeLEk{{-gD!JmX?GuFI#x!XCKHTPLIUT-qh` zykt6@+1LB|^=!D%pZ2^O)qGgw1=C?^kJ!`K2ln-0{yjSzj`D8^ah{jYht+>rbDL=T zli?&UnG6eEaxu*9F{k=rJ{;=9F!Sm8fR07U$=Ps_@0blmgUL`6KN+TaCWOy7&xW~C z%-*;O23)IW}oD97+qnU=Re9zW<%|CzP_DbPqgW$3+=17qxbwEAI_am z^UmpTD*EiR-|V6{_V?@j`xs`ZJ1&QbbEjJSxu(L~9_QOuA}Fh8%L*&aYZk*S|2!Yg z^R+NUTgY9Qv&adQ^N!W9)ZosC#avebab0DThV9w24n(qvakt=MdK81nam|jzuBL|O z8Gq;NCc|D{4s$upzah+vyae!S+$@7%^dv7@iEYot*>4TE?1|6xn6qJCn~R|t{-(p% zcFVi`dodO@TlwDUP_m6(R(l@;**T$BGL8Lxu`M${Q~0tFex!U8Q#`F zp=v&8rHRGf*1fTQIlAksgTXfR(J=SxCbiB-dhQ1!T6~_bW*eG1v>Yb+0|fh058WG9 zHjPWfpMC=a1%m0Y?67OF%x%4dB& z8@|b_(ZUR%K{*X|iTtW}Ggt8=)>fCjk8Pg#*&p^r5jLVe?2D&Qwbbn9n$lzsu9Xj( z4d;5rJ_EbZlCW~K{emnY(p!evsjlOJ^VNkObBX3rZ+nNHQ)=yM*b}ah^TimM_Vi3o z*q@(DZvJTafnK~loaqB{Rv%2YLanTIngHya>_l=2uKZpzfMcvqY7JnD#iF=O!y3N( z)%CCPVGuf3cRtLEmBifh5`zvC{30($6)845bBfI27Mc*?&O6Z7UEljfevkH9*TDJf zZ=`?yjW{h^H!BP~HF68dxs``4!cxG01@1e?nD^30;v>SVCB-^HlTD`cm;DM>{$7GIr2Hudqh>np1+0MEw{vhNQdMo`T$Tf){zC5dH0Xe zMt>~qBF{_mvgHcZY;Pc%H_gtD1m5ebFNf9A=rTJSdx&o3Z79l`UaVhoEqF(G?BXA& zn-25y6We*%YW9hd)IPkvV&P0IU6?fjXUX@@uzPD(u;JpI%&_+0_0=UG>!*n+S99l> z*bp`vG?kK&@Q&QU?DTpP49SN0(eNOLb$#_zJQJg^MCuvuuB|*l$*>n9Aj<#@tta0I zTK-TEHtRM#Gj#{tyU-k}OzAZtGmR8H*_3_MSs?^|R_n%w%=fbai)W$Rp6dn_5t~1n zKn@cZ-m1pGT1Y~%dGfKyxdS~4y{^p7V>5V|{0CxVli`rfm)M#NmwEtmE|nGl<~1Yb z==@}ul#UHq$_9r2n5u#$ZUP)s+J?m0rr{i-WD0%^^3Yj7CctJFm`=)=;}8Ns;FfCs2RF~hJJwtlS7%0IP=~x z*?*fe`JpU|uvgBEg27s+Wi~BF(qUwhrB92crMoqJIfdbV=$bf zvoB3cM#dZ3U;(vRz8t2mPP4xY)3L#9gEEL(qOo#|u{4<#Q`$cnR`q!BI+!y07LI-? z5pjleI?%_^nQI-xJ5cKYxIWZ%IN;7~a*(v-3>V(dZFuGY!_Ko8?}mw;iwt^f5L4K< zscvD+u)?x^Q`cD|p_2wiXCTguN+>&&n@8l*#cu$u_@~ zBnjPlxc*m~fP80w!sIESdTSqJXthqLgszpwycagV&t0-Z;&Oe@fnC0NHS8C2zrMn+ znm@1(KNGE9i&m^dZCkvawv14^bgvWMXM%T$d0UJZYQs2$hLd3~lOgo(eD8TY#0K<9 z{=FbjkQcEC!%B-6YMnGi7{hE|KfXs%)Z7s{X3wz04iB|LOE@}QR11$hFD{0WnUEUU zU@Son@xWO12aG7cUyO_#9$Lc`M}WY(GCW{m2x* z4cKLpbT)h~a5PYBS%STttm+l><}lJjaY~#gIpR0{0s{B+nDzeqDkjyrt=l>L?45pO4;9yg zfz_N&M^;L}FK5I~U6=$6-es~ftdttYsgQ>2O#Tph9*W zn!o1A10-x(shOb|rn6WT&6uP%ivAl5mHaLVq1bK2$R)ML) z+%&BOfjJ_mRvl55;{!#1oPV!IrUN{+mOLz!ng$-ANvR8=6rUI!F*r0QG(5uqx4FKD zYwA`a2mY$LGj1to;qW$nYeR09<@98zplb-dd@orjh{T!{M9PQ-(pNx=xVS1~DA zkyt`gi8MZFXI{MXIj^ma78bUye1Wp5PlWJlOhB)cXc%JMGi5WKnZ~mUNm!!|mv?@E z;kkR4IF7t5VK(G-*H;@-4)%wcMEC)k3Z$883xs$2q&yWr<{!S3AOBdeLBT&QcCn8$ z+&0(J4EGO-v*@wxC9_wj!&`B67T_G8ldcUWc3_Ga#3pi-Ktnev`95*?T2t5bl<;(% z9OyN{@ljT^7H7k%wk^2UT1Py>MODo`f779p%->j?bcAcl57!57IENvNNo*yDSY6+6wx*Y zcKOUadCPNzwZK5I;p{+g+MD8?nobLNGnUS{$|LFifWK50T#-5NVE&VZ9ymF|nuO*1 z8&P0$Qed%M{*&RGd`cK$q9xD#dN5}-xdVCO^;JAwWH@q_=orRlQ5=@;tBSuf&sw*5 zgdX?`oj8Yw0XFiHz{P4K!=)v?81N)-V!PLfB7oZR{BFF4AY|!U4C9?nijGKU(TS~i zRO=87pj7S;Mo4JBY@Ey}ZkGHsgHHPwLJ*#VlN0CpI46pM zp%c2NBDi<$?T?1r`OJ_960zY8D6dYKXmtyD9lC`@FP6-9Dk~-2&n3o@EF#$j*ej0k z&Nm3mR8b(5z5*Z%S1%!&Wb9^%UU%Vx-VkCSK}}%BZ3U%}l^PG0uZQv;N>6}nXGUY9 z9RV#7o$ZY(QDQeJwyG6+2d?L(n1rIQSB9jmp&^tmifj=DuW`=4aTbR?P>`83H?096 zH|-H*Olqw@kZ#LMZPszsi8#P=b>2b}+N1y@KZK0=P}6R=N=t&FE{$y-;<7WKhgq$O z6KmC6-^bePx7ZSi_XBB-DD zn-?r1Ecx};@LGU?9>&X2Sl*Y?I3K>&N)qB(m=*16abT~wXe6(hZN?H;`V_n+RA$YA zY$}ggZo5d`N01eUW6=^}5h6C#go&Nm-i;rVZ%6bh|3=*`9tXA0iWna$RWkN{66Sm) z?ys-V*>eYA@M3sG>@+WK-cUo}92HK{ml@`SQRD(an>QkkMvnm&r7FPC|89;xq-Xv; zXUl3?&WF_=t{2fXRJC&4hdGd|{#(Ouat?2&wY zc=n7Zvds_835_QP=`b&$KfAcF88%OiX+#8rm!+zF_vgd)KN2voEf=LNpoy0%vddWO zrad@oeE>>&Txcvc=&9k6y~uJY605we!V^Fciv zM~@}g6k9q8#|>jnGds}&oJJ6&Tziw@%UmYBO8;fy_h=qM8IAhvB6l}?gG_3Kt_dc@ zelg;3mZC%pf9ya*|5v17$sYo&jAh5xEWQIn(P#;^FpzvEyb7@BS6&YW@>k%9b5T@T zBXX(BQCV;(>w;+*l&%0`kzg#Fn$?sSk@chD6Fm)%4R_GzJ#>{>Z82~@m(7e<)<^;3 z!MRrZ5Zm}f!wSr-PK=Pe$3EeJ5?T>&8Xke3q($(arA@WXY-UbHRCgehmxqUSR-1R6 zA%qTTX&!6?ts+4CVZvv7c%i3Zx0KkvGu+bL&TuI0*@8B`Cm%sFUOpo9Ne?g~AVn{{ zr9L-cw_q7Xha=@>F~Z-~jihb36C${*iTDWF+NuydnwEr~FL;Y9TH((qHwbKDX8HK*tIvlW!7jq%?)pOr4>fEa8Ug1d z@DGNc=2M!jn&ZMWSh-_m7yIedGBZQ|$O43>zmboL7;-7$*H@37_px`st_8im!g@15 zAk4w0&T<=A!&iPGX?1;d%y3ft@%CB%O>S`6)E_B3=lY5d!W`=gVvOCOq%7Wwf}!3@ z0Sy(v7&tOua`9gkUkp72EvG3tNrP}*lzP+xa}urW0}W($7K&6cx)It!sViM57Dvi> zs0sEEJhCGf)C8l$c@`|exiF}jX$Lg6kOLLKLtY`c4g5gBvYaR&q2QHJTu;YB_% zR@svcqpx|wUOKQFgUTfF|<+Z19goTQmQ|4ugog69D4yBc`i7sxKLNs_z9^gMT z^oL^#|D&dZ^>A+Y_C$T&gkA-h9ggecvUDiEWqNr5oe775^#bAU+|mx??n_ks`y zu}2W&%c--XKmKtp?@x!H{3Q8F3)oru|0}(S4eu&eizC1Y!{ z<9;Gg=nB(0F&|G}Aa?OCheiZ69DP#vu`Nwa_Uu{%cJ;8K{BrzEl~d>Ukll&pp@^85`tgnss&*DP6Y4 z#Nlmp2(Ws4So)LNAMBoD&Fq^TVwebvlHTyWa_ct{oR_V_%rNZ$WSJGRPZ)mzENcN_ z6()XJ=vod~eYfb6iaT91~nSI#huMzf31o=JjCDr^j| zb3YUFEw9_m!g1UnbJ`5V2`L{FlfiYK)tvd>hG~QafI6IeP$ zK9K-?s1*-qw(ZyO1SE|?U}+?LUmhfjUXC0Nq3l@pS~%2B(XC;ep z{i+n}1NS4nbXYcf*tC&Q`Aa8iY*hpfua)Ndz4&4n2TAT3QNkSGN5lR6IueESv@mT= z28C_=Hm{tcf# zN1NCDrH2rr1SQ9Jef7xg%G43lbC-O5i>Zh6j4h!5ofXOB=lDzLev=<~u@1()Ig+Pvh!gX4dZg zfs!?0AvMN`pMSN=t%qK3N)dwTP%qqQfqllC^M*CJ+pa~S^aYDY1|5PepB;?SYaSkZ z7D6Np8g5RRh@n6H#zv)ZI_s$<)|aIZ0ZJo53pg})A4oj%vN|4OJSMd4a4CqGNbLFI zz3_)4YCqI+h4_QZ5e4Q6)Vg6H8^qUXZwCn_@6}`fz9>7J7;$F5Y&1wL8r7`62Fd z;#+bzg+H||r&Iw~nF=%*?dnh+Nl$CZsIu6|m5+MRTb0 zrBp=lIni(Ps6QfeUzlBSN=u3X2%y9}j@)a;GPk%;2&d=tj8u(#L ztrF#wg^0fP_|hFIp4Kx4zptoy=J*%zCLN)>!F&th_oQ+)S<`Sjphm ztAPScT<_UG#<>{20=*{~j+w2&3T9Qkf{(;_C|V!9H*bw0FOao* z#k?(4bEKPA9FBgf6-P_`UcmxuB9_JkbN}avVAO!W&}>TNuR}7``FX3GlqZTWbw5%UlTqkt!d@ zYjIv|Akqi^S3MMqy!J-e{N+6O#533ftCeiOEn_g`XpJAw#q7iY9t3)4;K6h@L!AtH ziSysHL8lK8Zz8q+?GKY*nfSWPo(b z3yZMEHpmHw7qK=z++$g}8`Q&T=%^5H-3COAwmK*Q~pllbmWhQIlf{D6`t(>!2fBn|=P1qERyTPU}sB1!ND zN3)2JdlnlUk81C%W4uYOS%@r?rI;LA^k8&*c_obqqdK7wM92_=ogGQBX;WPsu-6h* z(Y$bKCe-QL=V`LzO$3#5Td_}zBQ`Ny8!pNNH4rpPUYr<*?+Jauo=)awh!K-0?7>EV zPfv*$^t`OOw*1z8((1|4pG(;f6=r7h!!=cuZG{F^Z5q*xAg;!CLi-oI-69VAxo{Dg zx<>lfBzBnIL{Qfl2B?wN8;Pxnr&iyt=f8l-YkAHio2G%vEJh^4(44xBpRJL z+9Fpd-$n1Xe3XX+*&sewnsJ#=nRGc^>BWs%R@cZQ=4Q@OR!F>*1tRR=o|Y3@8<~@V zy;5N}xJn5_E>eOoI+=xJqxAFp(Mj-aEVCt@x|J3j^lmOJW5V5fB;gZj7^vr~6W{EB z;k_1kxjJH(y2EsnJaWshfR%b>#5q=Zb@I49+|j#{pREd+THhWX z8dcjH9X2)9ed1*C0X_p=K^!oDq<>bc2AaU53`C};*@~E^=OCrc;<9&dkvo( zCAk_q$`_`DW{Tr#@m3@}ijGWshx5$ zk2kPmHG<6mMWP+lT5c!)PH;LqfXn6dLhe*P^lgO#Fv}}A7raj_B7%oFrS#Y@uEiyVi;$uu5aQzCxPRW^cupl^GWKrlJWhEtnWY7ucs? zhqINajQWU0{*2q9comgU5OJ#{VZ_M9(okM6P~!5(kYXAy+sVeuT9vp1Q~MU{2(Qd^ zt|23vo62pHmaSEH$0C)Rj(Mm{M5GlallyS^q1Isr2UTuZgw?IA@UrCt33Fkaonawp zCeKYeM{O;`%axm+Ay(Bf3h*Wr(4Mr5h-A(e#eh+0@~Z1A3Q6)|t@QAqwr^lyB-*fx zP*OE6Vr%Bg-9jQ`nkE4VvudG2X&TkzZj3VW=zkD`P6Rt`h58y7%8Ivm1M`YpwGEN; z0q|2x8lm>E2|Yb|57SU>c(#Y#yfgeMW=Wk84-d;tV?e5?q2T+$bSqwZifb%kh&!uH z$&fzeJC!!kikp?Q9xpAS2Kg>6ybw}L3KWd#6sHwdspfJzg@ahu3Xa=R4K$l&y41dl z02=!El4odTmduf@msA+3#I#bffuk&+lrtTTaaH>cJRfNLyq@0FdjKy=}3*ae**aG3O&BR&NiV zi6)=-+afArhLm=E-+oR?OMxGX~efnp(p$w35i1Rd`pfqH0<1oS1( z?+ka0x~OtEqs#pqnc~`{k3gfrh*)`e+ayv$-7|yvDI3*kx|p^grJDyA@7d-#;;41W zzX?BBW$)8y5d_|hM%Xe|E&*?eEtNwh5Cj%Atz&c>R@)GW*{yV`L-u32m_Otnb$uUx zm@i{zOc$Oe5z5r)@zi!8A>7V+%J6a@pcshBHgn%8fY0qjB8k$Hybby4$%^b~^;QC` zrkb#tCl!1k9dH%qT0SOXvc*o&Ijlw%Rltlf1eXLlJ4gG5U>6>Zq-{jKr{|(=RIh-G zQVsUk`g*>f1Z0=)@Rg!8LEOC-sH{aNrjqv&mvKkt`C8K!v%xhLeBQ=EnIy|t;{dv$ zE9CQH*tU&cu|P!@QiG3j3}KSz-&Ej?6%%{iS+wODp#Za~iT~XG+rwr)2yScshL$L8 zyq74NB@4tBpOjPJzB|6)8MV$W2Eq0cMfRqc^j;5te6wq`l5f}qUY`d)G!-afmDJbe zpaIM8h#?*_hfx9u8I*#=t5N*=FifhVZnOZ=D3?uNGnW7i#5|zE5OT?)4%9A0BQRI* z`8=9n8n=mJ9pyMj?by^L$y$07x5!;hY!Nf6GGwBNCEttgGE6jc{0&5rU}d^BzAB{h zw@2rL6I{3pl591MZ#^6pOi$wGb6eBPV*>M~_rUmbG2kJl(}X;qm{zyJ()fVcR|FId zVwm$2<(0OGzpWmDFH>|vuDg?%h?Uhu?Gs~A`hJ8h6u^2tY~{>OF@*tj?giXS`O7-Y zJX$zZVsldwUMc4j!4}LQDW7mNU?@Oi`(gly34{m?r+L1tB^bFG7mIH8E38YM>!GTi zj7XR&e${dWMByba3~Cp(S70EvsE0h&pc?uX8zK<^Oeba}le2&1Wn6pBjjs(8Emz>@ zx!2e}w8zqn36F*mZau-m7#vkoT}Dl*qJ0r!FV93+MmV@8{m|X1N7=A-^T!yY!o}zGZ&A!eH5ouZ|B=pqsl0)&+ERWGkFjq=vjLd*L z8V#;#s|-b6yW|YfU-`ou!)A_+#%H6@On6Qwd->4jLNWgU!4zEhT9$owE>15E5d(5D zmPnc8q&aSStg{6YtBo?-O6>`DhTR4+Arf`s^6!NgQG?^v-KZR=}F)$p7 z8et?dpv>iLBVXQNa1{0L;3*fWd9iJN%^5I1kew1r+-S{r z^?5TWTpKz}Sd*vh8~a2M!F=;5$@^9`7yhbqTZ(Y%vOc;0S=3UJq6{#Xh^%n~krg?I zB+Hw_#ivE)O=x$eAP|zP;kWYLe01z`Yiz#*GBoX7c8uCKqwTS3_6mrCiYB6mJTkA; z0&Sr!OD!(zM;1h)B=X&*!e3tzfYhjVho^dQNR->UEn+bmM(bR0W;-d$#c3|#nT)G* zrN`l`DBgQ#xF=*_kA@0hDEc&*yvT8h^5?u5*Fru zu|>LHg|3m8HV2fP3-}5`kpq_b!tkc&#rOcH3zhJOIu_a(q^KPaVHcBgi>y~(a?7VJ zKY1K0Qf{;8HhU{M^R^Ct+Zdh;eoJ`jxhJZg5NBgNnkTxWl=zp!ZcZUMOg^2OzM2i$ z=fzqhwOHKbDN3}S8>wF-9c*p@gpJ5@4WDPe^#?y4ejtg6iqcdm9KV`UuzuL5uo6DC z3v(H^PJOfeQ4fQMFvz^4`B=%^%6B3xio#)S6os1$HEHm%2Ni_?>LUL^Bhe(%pxCyQf5?zwH+A^n-_X4FHkch?@EAb4Nd%9fsJ8h zuzm%O6aAs$ncya2hmBmbj$8zr5w4P{`Hzs<`%`hrNF>d@gGmHs6Q9sE`aKvoJcRPz z>5Iyr*u}Of9g{izJBaJE^3UDI<^;}F%hrW+ivq7XLe@LM!3 z`mOu`_fo;zyU?{qSx#*m6g^oBrjoZtJk=UKT;QkDj|@TP`mYZ^$OVU8Sklzl6rZO7 z01xzHDt3E-R!y6p?~RC~nrbfWN-;omH~}7t!9>IlV)d(!C1vTkeAicKm8R{zL>)V= zl6jn=YRQS%{2zq8#rM?{Qj%x3T^IPV&fs-o8kgvfrdP4|{Ro=yGn$hP&kuoL3wjWa z05)bhk!)aqh2JgAUlBD-Z;KfMbhT?F@>2q}HPFkoxwIns-AFUam1faAH+F67;EBx} z2^Q6M2n9v0Y-YGgB&FfC-g1W-yL$q6#lAIwKyN##rfa(rVv$1m=lSMw`T%=h5iKl% zf-1}CDk~z9Y}H(t8>d1(61-WQbs4@*LueRDg}yj0#~cg6%&M(!oC#5!9f}KY@Pj&h z5XBXU=r=lLn0*_Sv4S6Vb15-lu-wiGgbR|V^P+vY8xH^S^v!sCb1iOgVdJ_$05x*%9n9$J-JE zq&y!)By>5lX?gv^2*!CuC94py?w!PvA#notkw*KH3+m07=cBHA^aCi}|7ZN8cfikn zru~j^iqi4CJ-XcEH`d1~yW8SExCbSO8pkgkCTK$p++YL>k^&&YGqIW<;;4cH5nfUR z2@Z?~FyEi$49O0&#FKE&mcXVLaj}Ci-HYUSoAzrj1UH;5PI94W(O=_=WQY6^@cSfR z8m`%6^G`KpXzUn^=&~q9=v5Bew}bIGW(O0jaiXYAmqJ7M324)+IpJ)!W{RhAxi1jL z(6UawK(H+>b9OCHn{BFrVT&$K^*$m92vSQRhO;S~74Bto3PDkN(p|N%_~GzVJs&Qt zF0S-5PH_E_f3%hOr5z;^a}|MMjpjSugdd-=BXgBr4xi_fh;$uBjB&J5A8V%vM_HKq zO@z@h3wWt64L_mFrvQ@|V&J@ore2$|w~Cqx*hLPEpOZI$_!1|r=0Goc$d=xJE3)I8 zVkeVinDcKT7Ss8f)*wRO89vSz-qBgvKl92Y=;z+ab_k7!Qr5jHOp$*6Jr}}a?pzbmWnCkmK=sKec2UBOLQ0_&Pgx)7JBU=ww38-r%|eF8b$cmm6Eu&g2P|vDi?cz zb<;PI7Z<5QdP(eX+we_s)j<=OOxj>T#tL%g=v!b*-p*!S`=i4uH@6x{tX#1s(ma9h zkYYIH|Gylbi-tUADHarsEldv&_sG~u8UZf__+yCWe<4acH5V~e4^5cb&b{L&$UPZs zd!ZW>x(Knq1rxBSWtLNhqXr9K!rG zwwHKfrY9QW4kjD$C#Ou1%sLr;6M8}jjudWWE4ZB;X1i^7n3bCg5d-sG5{MV~4J?_) zutZ1{LO~#OPSSD;mTeL|Z{|XdO%hH(p_20mrk1r5iApo-=yM5Mrfa>ZWysZ5z;Yy{ zwy){8ga;eRt>KU z7_tnAeC562t*d*vc1i_NDSlC5Dqt`e(?bz0)3{~3Din4B7y9v`hNy;owW$Qv% zx-z;DeLSRBWVJ!fw>9cxtx|X+;7tC-vWY1G^&=B($T=vb?9?s!Ub)gX8&ZB+j+)lX zD>BLRjghRHH015)#!9hKz0n`MZ(*u%<5`gu_g`1vIkw|E&P29Qc{K4F<-4dwDq+%s zPU^C8cc_EnaL-m;h}OgjzzIR2QTUVrL{5*k@;UAwb^#Y@=fEq^j;ad-jI{EtBjsGz zMT~4#juJdKkfln@KOR2LQN%XjNGaWjUS~bFTZx3OC2r-VM)0P3J+7IHqzeqFA|BX6 z#?2a$p>cv4-3g0N;Kgefad-_UGO#Cm`QWIP({Ingxx(79VHTG8Mp*>9O+q1r?gPDb zNw1R;j9Uc71{#N+Eb#E4iu5Yk3^W(Od&@Z6ip#?Pe~ZC6njC0yAMqiN5k29 zWfb605|&n;_(zXMGl@g~n}ckh48o}Q*1*o=0?~J!if8<1$@-jIM}t-$4&pprVjVOo z*A!CL+BYDr@eqv~df|;V7vxPC)rN!@$qf}Rt!UN3rX+Ai~*Ohy^rhby?uNR|1*CjehaIRVll%rEue)&JuMR&X{O_UqHRfq*|&$6FA{K3 zR%u-M5%V|_Uxag5A_RQzOfFiG5glllw-O?QyRcCbVQ`LeDBKt3Qv&Eq__PhvuO zD@@jwKYW^U+cig{I`_1O2;)%%?2EBsblO6N{8WYlG)O9&X+qwgs2KUWErEPT+A7el%s4eHk4kv>V#JR=0t(LIcDuA=xvR6Q(p+ zF;zL^Dg7{y-y?GPbYr+X)N^lF{Y-HGS}>1_o8V{~4Z=ZPw@Cz4 z5d)g`z9)3vBnl~qA-rdzb(o=M9_%0i%=|)$5DnTXqF%8~T?Z4@ppK)-xaq1mv|?}M zvPOScZJy3GT36^N-C9W+h)>3UOSJ@dy_Nvw%)6O^vS8R+{96T5A^_Vge$a3!`$CFh8fkb_YCz54`D#V(6?tfR zI~}@mx1t4^E)`kblXp1{bR9QZe3hB8Lc+umv>f{(OJ}Axv~p!)M~OS~d#KdZN%5$G zWj4G*AxdJMV53To&WObpJ6I#m$itqA5>(Cam1wby7+O8F)j;dccVQV}9OjhSU2jbY zo<#7`LqE89Roa4MgU98gaVnfBCn1dH`~}}7@**+OPvu0Rx}=(px^)kGt|#Wn$& zc`;%dA%@~%zwRJ`H8nOAqxG#zU?p@_trdGY6w;i0B)Qd6APv0B++xmt@l|Jp`$I+O)*W;$Xqg12Fq^vNT=G_0=&=SE zf@|Ten6${oF2uW^i!o-tyQGGBR$_!M^DRV40G3Vp2_Cp3$*&0TR2Fu$>@3z6Y2vt& zwG3zLihkeGt=YIcJ6g}Jqhi+I4BsT!kq^0ms4OnEqDNh0GI{B(;ck+wRdkrf7_|Ui zk&MFjZf27u_uYzFfkM6$H^uB55oL$`89Q|IM(rp-x7V6la!8UFOvc656q5^~=MJ@v zF#k)J zumT;giG$i~1*DN#FS9e;j4Rpfw8M^&=BW5+XLSF@sJ#j2?Rg3{^#Bw^8@5?Jo#!4c zhH`5$Vl#wVRQJFUenDmc+*S;%Me>YKJR6?nlHD5ah~8_3gbPKTbHwSHan)87yVRs9 zC8bG6KqS{XR5i_!Kgtz3PEPMV(1n5hA2gl*n~l`Rs1()sf6a75BOO50S^VO2H_Z^!;aj<|-4W@q(L7KOn0_6(+=aV1@8o%s=9w!O;ZgBYV{9yM zib=~E?XWX&6f@VbNj#8qf)`eX04WLHuYLM811xLtjwjB;Co2g4@o->6`58d4kWtfv*=oZ@h~CZaeK-T23OD3Z)WOBqFoW#k9P za3%E-T%dL%yUgr~7mn0+cuOHV1D$QyyxGSY80Q3PjIzJ^2Q#CQw&0io2IYIhFr->aMMzdm`Z1)Xhn6mn(3bZ`Vxe#ci-eWtVtf&+u5}mITn{2Q*v15% z7HjZ|}?Y}_ z9NadoC{{c4C7;;po82!Ak9JnV*=$FpWN(mZo8ykaayboX-!eoDUr(hk&ZYH-Xtkt- zo}~1*dDkY0ECZ7RVd|E7u&%z%GT4cz@Z#N#Ngws z84v%1C-Iz!(U%%?ef0%wEFp@Uo9E^VeK7n;plY(0gr3}2@ecjE)?~zy)&+bTKe9S+ z#DXu!NF@B9+4ekWXClHl3^{8rOVBu<00%H%hIMh)i`;^VH0{%IvdwycT zqez7?%`%de|9ILM_@t6!n*B0nj&I!xrE(#{66}<979@O({Uj!|oy=FiEyLygptB5YD)6*0sWX%jidU=>z02PdX! zSN@0=4huJOh0W>Xipr6tE$3!tD8VsqS zm!p}toUY_!cuTaaqIg(8A?OVU{I47c~cH$9was5fd?Q*8yglSzc?y#4sA5#ygP|{djmF zwUi*vyOx-klg1Sqar7KzD8#PzVF^r^ik#0*BSr1|Xt)tTrQ1ZPN>Uyv0y7j12@%Gw zEW2-Wcq$lWM+x~AF)IuOa4(ZUk=Sv0CT#)3<;Lw~1`kfHmDdxaOz-9F1Bm$6M4#+W zP~gPPs}OCl`H3a!S98W(N0x0lE*sf`RTg0;tzEfhOBFh49TE1`9OwwQ+KURoiMgzi zK9ODF2O>pVlqdmoO9($9lr3fFH-{f;qKNXjscbjZfJRK}PGPOOfs?`feC2jrQq3fs zRX)H+QM4NBl1LdX4#{><`U0FBP$$xGtS~3NF6x%d6|zhH%+lmkd!aX-#;iA!6|PGz z|6mRL;-E8PyJ0Bj>Y)0fQJk4taHDZb?*{#++IBQ3&;Yr0V%mIUdgROG{g0`gM$0&RtXjZ~wcn0KHGhelWK~q=iojz?CW(G0az@^I6N8GC>RnwX;2_+-i-$G-^7NmP0ToWcG37 zccU*AcW(VfEK4r3D^6MwNUl$KJxq{=NH2cSTI?8Ho*_L=@I=CPq&XS~fkb@L+;EqV zGnaP4vmo7;40L+g>iP&P9Gn<8ScE4km{?99h6N~-+S#ZkK}^GpFyZpz*4C~%KiwTZ z7AvG5Kft7;hh85375v7M1z?X28jd)-h)a%~Nk(wOUWiF!*4#tNfI;iA-g|x=;Snqd zv6yvR7#A8bYuv(mD{f%QsmztTAQ9aSVcez{vv1Opinvbxz4bz8Jg?(4q7KGo1y;6) zCwU8UZLW9kzz^|nh~#7UJDP);Ky+$@thpQwor({<9G)fY2*tXCfP)?N!l<4k_LQ4z zd)Uakc$LgW245xu2glg8yt; z?t<8|WDp5&=p@lMPJyuyDZ7HeGZev`=Y)ke$xJCQqtKG_PEcOXY!^bhh)w2t}+6OnIX zRiDva1pYNxvT8dj;oD#7?Oza#xD-ElLn;zt5-B12qQ0!pywO+d{fyf{yb@wH{V{wc z`FQNNvCSUZf$jOz_l-a0<+QGWDU|C66&{1nETeWj!qnRxt)~ zfNH%S!I`zdmyLIM#Rf4|G-pk8-4@x{e*{EUroTqMhMz65i>@VRxx&??Oh0gmRZzYo z*3vr~@F1(l2(jWDdY^ezUNPA@Ii<+A**6+JxoPF5DMV}cW>AVK^ipt-&k1Cw)YoS9 zD$I8y)af+b|0=o<^D{>famR(R9_qo6X2x*8i`0)pL=a{INgO^Sh9?Sqe7pQm5 zcbV+>m8iiZ-rAzwrbW{yG6yqS!;*_%!=Moo0v4}bPa~-odqa_#aI7o^3CBb4!t$6M&E!%8CPy~T#Eugu>ZGztKvG7gLNGqKE~ip;#d@4 zwc4w&Bs2xQfL54#9u+sk;i|SC2McJ~_fI%aRs$tf8s4tJ_xD=kpe9|&GlO1eE+Gsl zg*kASly3(bdwK=}>pdbNNfWs|+-QV^fvGKMu?e5`~oLBMZRqP3h&^ocW2+m&2nR3zB3MYTjR;4LVY?!e%j! zK~|L;{_*U5T2wr_dLPxWwT0SQE+vSXVIJVsl+3?rxA+uy~3c;??} z4TAInl3b%l%gDtSzD3ODLZKzVN4FQzs#y!BRB&*RiGT=n)kLUhz-=;7aW-Uj4JL|~ zqsL4xB1^{o`KcWT3=~c|^$HVVQ{2juI$!OLWp|3N$98AE^?1<(3kR&|Vzt)9P zI};D<18`9$bLub9kaNRtI}5z=9+Zj@&Oz9Sg<`2Te6M`}NEi+>sIFJUSM+xQkPl&td2q%S%MGUPoXIZkTf(`4uDf zB=tFXsO^?l1mjFGxrsjMEf}_DR)zx+zNKlePoXN9!GYMpMqS(k`1l38TqzCwC3=rG zeMDB1C@m3QbtmLT>J(TK1Ll=Owmp2I0Y_vzmk6E4ruvedU)+TUlyl*MpXm_v(9`hi z*DWNXn1P2>V~IeqF6PyCs%0c@4Yj{8kDWUbm^rJ`?Tuk0N6enJ+!+LC91&d)3FF25 z7g&g6clPHhuSXTU)H3bh9m1y0{Y9>bbUoO(c@0qv;rj~h!NOQkJ&Cls_(GG$k+GN0 zy}lxrscXZLVQ%^BE1aa2DE9=zEaCdlSuQJMRJ?eP$!KCOg2xvT0u-j`DT>vpT$IQ>SMzZP;k$4MH=|F$K9 zNlVqgzWPiUMF$R}Jc%1IR-IxAgSK^uc+n``IHCs=^H#?iPk;T!n2F<@{a>B&oL_1> zkbl2AX7E8_>(E|}RnQ_NqL+nS928nl6G{>lCj)?AiJZS*)NAT39#6aQ^U*=_^WW!+BGWk3W-Duy+#|o86EDua@mPK=dj~K z)-lJ#l3i3T5QM^Gp$*&=LB7ey#x@vR+{B4)j#G4}>e?sy7x@k(n;Z8VuxAEb10wov zIwPfshUF#dkELrz3ZrNL(l0F^gZDX%;Y2;&6I+MfXclg9B2ik3;cgGlg@HX-O(+8TrezedVwzvj(|4*ahYf^xW)Yr#)*&7gs#4HH5lw51E7?->9z7DRrB@w;2 zLmfzBgn2XDFEE73rVEUfTLne{?yPK-S{DTcUq zHGU=xZqxX1_|pJq1RrYh1J&i2c;#)Bnms&BM`#FnX_M#Hg?}&dC@Ryr#h(O-Fcbt+Pq7%Ihn<2MO~q(JC)I6o83|2U2fQpbB+??HoEXG8qr!s{RrfI_6LcL)+m; zN)C1d=1|TtwB}J!GX?SU6XG}`a#+t43ut|b-kg^iGTRG0nm&upLMF0EffFh=p=p$$ zTdDgLDR`JK+?8NL4p20m(kE~}68_GivjF5;Xq*waz$2bSL2zl&5jZll0&~VGA2A%3 zN)Kvc(M`kXC3KX}Wev>Lz<7qKHGxI01P`;;7XYpn9ucrA%As^HA$#vS%h`exkqCj0 z^X$M(+2@#=pv1lK;ma_cKWfI##HhoW5bhaEWX}`W{BX$w*w&dt3z|%d#%0%P-L1qch_7@d8|g ziYTF5SW5zuN7&ZUi$*M(5UuGEs1D;CM)k^}Z7x0jb82fxLj@mEX>7+sg4oGM4;$nqRb}`K?2Q6y+Yez6S-h!y5ap20d}}w zEO~R-O{Ov%EdO?$sUhMAwt-#-yW@D}_}9)^jz4l9*u|!tB47{~BMKwg>sp#%2pp() zHp;LYwVy$G-9 zuDe6pTDJgZS&ZS~)-vN+nEJsb6tGwp(0BiQ`1ap?``5Yjzvsc0;7l}=)uW>O#l$Gs zocdDJ>ArkuM0tejCTzYMHU*-_F6Q>66M8<6BHVj)6=E7WF3#!G-Q<)+-A6t<8iSNG z5o_cd4S4%|KIkakpScnrk}s)>K#1C{d~9qd%OA!+UGg$)Am=}DHv}lkW8NEeI)%-a zRPOt|C68#3zz$8z@NpwM^bHMf;NPAk!Y)Lf()U7CxeQQgjK~bLuKqu^-n6;0G`r68 zU$DRV+hNlRs#|2Ux~gj;H5iE;t8xGns4BKhhIlb$-bw%$#E?Tt>Iip#_isoyNm-(8 zQM47b9G1foRmuP5XT1k|uXWO{2(lBvz305cp7+{&|2C6cf7^_(8(X6j5$9QlAcF=s zyUv~LS$#`(Pcj^_#_#{4{>T`k5T-yr6J4%k1Q6EXG~{>0W0V^^Y8@`H#c+PSBs_xb zi3s(`K=j{-$Km4wwYom#kzO3!njp&);X4gr_-cAuA06&+!w7fzJzc*ok>+%5i*JEWEZY;Zy9|N3@`1ZPVr1g`c3kgmeg;EBOy?gr) zr~kG-I{=)4i&oy63bppFA*?j(V2CVGgomS(xyo*cABCB5lMFU7vomnQ{&`OL)?x-6 zG5qawb#6hGn~HaaB5o&Fcx+*Pe9i-;c6|{^-UETO;kP($%-gE~TT{Ly1IpI{ye{?o zF4nI92r%pqfC=U&ttMGld9b6`g|!`fMUvd>^!jgDJ<%lb9$?5B6XycHD3TR=u8&r=Jy#tSE^= zTg0)Ee8X%99A30d_ij@G1FPx>tUV7pmZP3fNhzm#z@JXPTbr6AX4%B9JvGe^G{-1W za3ni8Bnruo15GHX1<1lZFVmDy2XIDFNtHd;M-d!}tQz%O!6$-ZTt~hcu!;<*;MS1{ z^>Hh9H7KX{2nw~1bna;Ws^3kydpdlhQAq|CmFy%4RHTE=vMB9 zgVCgbESO&dd#$Z68Slt+N8Xfi89H8_M%ZTejlxD=ngk0HJbrO3T@wTg?o46%gd;1! zy6)cD++{cj_#e0&wL}F@oeh9%kr2Mfha(MLL?jy{_D1rqZ6TEu3uYFoOZ6k+aY{UE z9A**BKgd$p0k|~r9|5=A;CZJra?@9!eFp?n2-6vF9rycaQLR$Z?P5bZ1rY~`=F!8n zjFMomsN`y13YPJ^+nW~sN+RU^| zA4w-bn)|uc(cMwU?MdN`=%# zqg?{oO7=#guNTuV=f2#TWJP#rAaxj1+tS?E5mp9yp-50dVHK(FMJZgCY7ELlGkKFp zduqtcG%w*0n5L+n<|B||?FoKsvRu2oSEx!q@e<e|?u7v2msl_wz5nIm64}nu0qgS%PPafr4{M9-N zHquyVCme}n5k9xN85Zd5mJ~^SH?3Ucf&Fl?}D6OmovXV;NKd<8!sA>J8HGyyqcPy!p3kMgQ`WlB? zFF=B2H*ewVh%U6~`z-tEV{Ct$Kx=GozdtL{mR=XWH`D)J5<(4oY*8 zBlfMH@j}5n9-{d9QIx-s@>hyYNlzcej;_i-vitOBuxJ9rp@_|?GuQ|&l)@G7m8vOq zpj30+yf7`#HuV?AAhm>-L)hMk8XjH;TKn>df>!OsR6h?h3+N;10S?h8$6*kCZE5in z{$o1#Pk!R9k<)c56`{xT&FrIl?63g(@ON`4H@+HDuXcjA2&5&T5RyO*UeoCBbwVc; z6L+s(fI&n;{AQB01EWiL#B{o!y z&q2}go*Biljc52JcX9~os3y;xqG*0}*DMCPH{CLc;y1xNzO~su4bg`GIv=tx4p)tf zY4cBRYBW0RRDl=22HUjGv}C} zP3?xM`$db4(VE3e8r`FcHrPO$Ane7TqPBd~$z7t(f95}-QnSqgaY6VFFN(a~q=oGvgqfxLGFC-Yzr~q1jAVoT6M)I$-aj?1nCn3na)h2I$p`;po09 zc<7)N`@6RkaMUS|F(F|Hsi*>wN$&?N9yMVwQ9YLJHc1loBnh3B&xElTu|cSuI=BQj z0QnIXniy`Z9owANn@ezIw@j?CoH~_Xm}gS$9kqcFtlxLS)f6 zn~Z!q-={K_P9mgvMtmB6NR;51w81*PkFxh;0cgynp+bkh_mnCgT!^T-@Glret+3Va zBVVmBhC;@sOHywJ_^G$1GtnG2f+q6Um>}xRyNL4Q4a-xpfKtbLdMvglf#ZvDR8+w9 z$)H!x{Hz1xbT)yliyvrA1I2)NDk-#Uh&;38RgP_;BXQWu^GkFf z=#*L>$}=82diZzzb2LP$)jXJ<`XQHaX8?R;ZaL2hTeQ!=#$ydThq#Z+@~JU2F=EWc zk*fEDV*B-XS_V56)E*#?m^^vUGkb*ml1qV3Bvt!2Ew z4rsuQ>#OOp&kxr= zL2x9N*q@s?C~2LIxYRgV&DrX9fKc42$%*s2O4WNh6NZCNSO)i z&#EHhn)qJBjNWjXSScpm9VD@TA}SU=Xr4=?u=)b!{F9siruv7!$TmWiBBVF3OkFf4 zdOmRH5J26AVv&7mR8n4qpe4Q>u9J+rbtLnU@{2IdoB@jP-PsXaWZq%eBR2+N-x-Fa z4+(`0JCSFzT@Myr@$bdWX^Q1#*n4ml_4(XpiZvQIDR{6%Rh)0sPl(N%PDqEHbSI8a zr;mJ+360yDe&MeuzxIhg#~{Q*A2HdF~RCqdM6pXTn$x|NZ|_j}Y#Q;31rT-aZJ%0^o7uWA>));{B~egr1D|)T%Hv z`BE);RJNq~zuqHAzewL+Av;?j6G%lP`FJi*2TG;$pm*$ys=zSJbdoVAC#F4guR=`f zCKp<0^mt2yedbd&f$)me9p_Dnmw$2d-&Ftb+EfQ~KxoIr2W&_=1wRU)PzEASS<%HB z2d&QW^K2-nQdB8`J`Ba(ZCGOKEl5M5u*QttowiGk07^Npe=jvS#>(mfTC+$|j#Ag0 z!A|PApdtrDsbNCqdfTV+2=JM#J_bJr4n|GcoyS;s9_KLRSu@Y*%~sF3ufjg86d$L^ z7i-bSc|cdGwv|-*z~kBz$1J#P)96#D=b79-@}EVZ&;UCqextj|x<&rrUB8YgB@4ez zuv!vT{s2zI<%>G&J5Q%wuU#SA0CjW3EqDIW^pE_x*&B$%lrQ1-3F_724@*|iD#Ld{ z39pdRA^+6U9uf?xAlHZ~k--Jz4+S{)KFEh;;R#Vx9M<+Yz$HBID;wR3O+aN7=F>!G zeG&h1`jMqK14cz$%i!xR0p7fp(ZHZ+f-nQ+i3uMihdOu5^Qp|XM-)Th13{sE#SB<< z)Ji$89k1UZ&mxE`jnORr{N?n^`X@mOAL=k(v4#5`BkccbF)PE?r&9b zJEO1MV)pF(M*6ju{ljmjcmK%*@RA0x$*zYNj3JMNr9uk`ou$s)Maj33mllku@ADEf zn4?6Z{i^L@`FNLjwAfpzx(VK8Nfb~LH_U)k&LX@u4+5%JsZaL$waHAGNG`8_!T4(w zGV;tX2Uk2AC-@1FMiA0H&a94XF|%)!y2rhLv0-d4{eMbSvPo}w4%GVkXNW6Q(z24) zRrVT2ahHY~!WH#A9JpGeBeWIlF7uMFj*e6*(e4{?QoQf_Jb^8o zw?!Ol4z1R^6^F?Vw!|aIj`=)qQt?6RBVDX~L>;4|%7NlPJ;IV&G~=UZp;b6Jgn{Mp z0>IYBxgXMtLuBs5?KHT*vb&g}DvV^W21UC{yeOwG#CLu-@uYV+E*+>33{|~<-4vq? zyoun!bhmb`KKn1~GCy$~4ndlc1!YL_C^hY|XaiyO+*&V~`mfsM-V|M}Ft`;p zsD~H&q0X7mA>M%>r5^SmOfBkUh>iGB+%)10&xG6Z9@Rzx3$Sw{P*&Jclz|>}%`b0& z@=Z7BKxOD1JQNnS);m|*B+6lj24cjnyyW#60N5-vT|NhByv%wZ4Af?K00)44GqKbipQYLI#CtOx!cdq^i^pQ#oGXeduLb%@o-#| zgK4{-B99_Bsj>u|&!TEUB*S2r5w$*FDhWaB16(Xp9Yf_Z;7nB;uDWirh@Q+=*JrZ# zfE7zgx=SnI?L`I-QFw6Vl4Bu}IvA{eHomS?E*rPLUZ@mj?Ml_KpVuE0P4Tw|oI2B1 z5H3vS56D!fzg3;?3DQMJ<_}=nvP8>YETwej(WyE4^YL`|VJ+)9sg3*y{KO zHs~f2$17&qbJ-bldGTIy7Cjtb&TUj;cX)dzm5?Q+sO3=T!Vi)A& ziw-ttczz{66c5Cg@UEY83y94$AGgVZ8sn_ieXFp?mE<-$6n^geRhehcSH?-n1|4Lu zk?kZSe7N6`|1NE_JKd>&G9cD0F-~sf_9ntlhe}XFFd$8M3fXa!*x%`X5#l!y=2I!= z-}nfKPNJ3}86V-Ofl562Q?-P!p{79U^|>FRflebl;&WEfR{*dMg-Kq7CV^y4^s@3x z&!4lstb!yonZ<*1)9R4@TIEHjj>b}ckssnJCy|2O z!BlK+RAwzm94XUbAPOLt?Gt$kyNySaAm7K<^YsnP7BPi3#6`;2orcEPFgGcrRNLoc z0Fmlo6jkwCr~+fX-rXnDhra4dq1F9RU<$9}-N>u>SW$&=Dqs4{<;o(_dG~ z!ewNlZlgqs5FWV=CpfxVIS|bte#}_8_;ss_goF+t3Ylm)tg`HdH*${Jw%0e30kK|T zmn7 z(7F$gD3S}K{}cfPe_d@v)~DB}^*WGE*|T*dqhob1j#RsmCdQ~IVpH)JctQZcjVegw``^oq}b~_~cn@82$Hb&|UN7v-~4d}62g&e1tG`LM_ z#0g&c>#|EdEaJhGynhU>w71M+Xv20tiJeqdhL}y zU39_uMo3??E7qr>L)5;JWvN{WgWKK!zCq(g_*o8?fTv|4pb9kQk(sy@eZl{3=o@(rzP684NIL z9ixg@m!DZ--Y@&@bDFvD7i!m5WBYnLPIw%24tM2{jEVHu=Ov z$O9v=H?-Etb%=bKzUpgcGCw*S*!@8}e$E#>$Di4%&dMB@ccdCfc9~;vwqZS%kX_{l z8u}=pNin-EX!RkyZqbK7j0Li&`Xc6K@u~or3Gf+o;Vckw(Uj?1h~AJYl^$j%zN|rf zhk@l{7WX`qrAUY$*P-3<6b}hBnOSJTt<)cLAbf_{K4C$eu3irX?!P}Jz2%>Pe1<9` zXm1)&ABPj5lSRUCdK5n+cQA^58u+!aqn9>^hSev})Gd_|zAc{WX;(uHSNC;ooKGfB zO^Pg7AB@z&^LD|d&=H}fD_*-0Wp~=G2P)3=p|`!y)IndwS1*vrsjN^bue~)4ghe;! zUFF;Bbx2b`Xk1|G`A?^hO;FQB7T+dS&IZV*ZbnAGaY-=Q$J3{^D0L0 zZ)p5UJZySn7DAeyD-4y^>O3LGi+G?J#I}%i5kZqZ+}ejn`FY?PMr;fiwjZc#MkSbK z!FVA)_AR}kxTz2LO2RRI+}1HaPAe;8M6A^F@Jj;uvx8rYMDyj{A~Nfru*1Uv+a-s!h>%{N zQ>yJ13PyAuv>|F;y@2;>nQDT9h%4S$O&jyyZkCbAakvQR#u$1F3)X|)Yk~%nCVS`s zED7)gdgZ1!KvAez^~R3sF#KBYI6KFF1JiG1d#hC%d`&J}bpnsco!7|JT#+`Zd%5 z#mb+MqYXFQU7&BSn41A%y%kZj17mEa7v~vy|o?UojF_TX1r4y zQF1XmYA2AFQ5vt6-18CMhR?ztUhPv8K{A5d^!XgC7%Mff09Cpphw%6Xg31YLLHjbJ ztPtl0=x1qsLQ}!Ox2HR`A|Rx4y6Q*pcKp2W9|v&5_bR+kRyOXSK(C@k6bsw)j5svA zCn)|GL13siWn_mlU?L~v5OG-qA=11p3G`uhl-cah?GJpZR4e*Yx44Ci&HB*g-fF+F z293i8IBsu|ueoV45rs}L0!&&tJVPWY1ni0#GI-&B`Tgm(AGw=m%#*2|OCcE1QQk1} zs)%FR`@s>KjBm#ndr%mgAJdhrRRVKVvxa0;<;OUHJ|lF}9W4v;=)uC$FJ zi9SnI!IctZem36edQt31pxZ5Z8}Zln&2f0Y@~7tVXcg-gD(B<9HQzt-BBk(QD@5*S!Qf^A;Jb zN2{ExJ_aCcmoN$=j?lc)_|Uon=Rn3Ufjr~*=*qs?U&a>e&`GhHc8 z6}jH|GDH~7RQy`B_F{zZ9nxU_>kON%BVdIttP?aOv>7Cl;Uaa#lPTlN#NiHQGum*q zWKea`rHN|)`Vt=)_cEJ0+#UJ4A?>`^ZP`M|C$E^oE5Z{#A$8TIScm7=Qb72@Wc~(r zJV8X$!oi8EmzoO|mhvBEYh7IR!m;vg-%&QV2s06-G1q)H{k#?+qwVd@G%qG8~|g5 z@1X8Q^{4?H4aOGs{ra^3tl+9SiJ>&sP?-FnFTDsHcTis^L4)IpBfP8ftM#`TnU{3i zT$_GSC8M`~(BEc+lj_*!0`2U&L7|U!e|gmIBC>d6PaLoKKo%q1mj>gow(6;BQUrsB z%&sQ>EjOs)AaJWn=lT+fC)nyVA{oT`GR*)BhC&`4;L$f0wveFlGV02)IkTa=7^2iv zD-Z3j`~*sOJfjikDd~NlEfU>)Nw z$rvuqPBb~d_bSv#xRoB-nQogeVUr~=SvWFP$(gd`LlC0sGzA4Z8YLJ82P2z84%|Q? zYcuo-Z2})4VO@2t<;T-M^wu!eBQk;bLHWdqjfPOCyiiL1keW4`J=&KPy6X z(I<47O@f4&J?kM55Nh|l6Gyh~E+^P`z+}CVRq=kQR^*`lGyDd~$3K5T;PN^|K$X<@ zq8?2Ty`@2=b!EfeD3bo+*YvgE|NW(}={9s^Ave2lqqna=0xNgcnjwDmWZ7V08bkHd z>3?}ME>$nmd-SsL53f$q&X4Lx6_`Tu^&w^w(fYZptu|h66vSJs}mDcGSF4G&_mbqhE z*dBeJ&YDu_{TEtEJ-gvpBu>eMyVH)>b&D5YB|=cC79z#=6TktHJg&duDQ5T{b&$1cqgCt>6h8l(ZjoV(A@mQAHla#^K1~M^ zHd3M#pR|(o_;iTaC_3m1oe`woJ>l@{3H(B>0uz?L>Lyn4>d*XjMUX6IR|NUt-TJOP z$CUUF6;&xp{Azk+fsHjb`f+8_^(OH%q-LGM6JFNZk;lJglWEoA9PD2ZT&&NJ`b%rC z+oS*ABC2*gSLxKMKA%3U?@|kv{nd%>MHX>1Rt*=Kt)&Gae7$WA!cRGqgKmO0l@+VH zju0B&G!Un@h0lpV2I1Wy=w|;?e)$r_uQ+?qCDKuSt7>yZ{_3C5tM3#G+hbxj4EOYj z-M}7>n*tC&Vj+Km+7yg%CO6crt2j6vPLW8mAUnK#Zqsp2$xDM10Bmx<09oDdq-%zP zzNTV?+TjA?@`xif<=14X5Cal1Po3HvgoDNvpL#1m$;LXuh`L3H!KNEJ7cbUdprBdV zmUZ;!LS3q9UdETGKnd%C9urCoBoBhZX0C?s;~d+!VSi%$aBrD%(6UXbsAm767=t2J zKpk2IZwA!xRdjlh2c&w>5fJy`&5h_a`#r3SVbo|mZZik6rXk%=rp8_qpxm#|NIQIF z$#R<{CPrCPBkk7xNgAfr4vNP&2RaZdEG=DdWs^|t@G%>y#Q zShI2ZHWb70{&Bciou$S3>WxHJEuF}9s1O_$@rBoRGt{P^t$19z;(ODZ^SGJ&k_dIH zf;)a(d`$N=GGwiaG?}Ss|Fh{MA3W0|@QjcnUb0Y%SZ(zvp=p?kQkOUqlwqID@%s+D zVlO*bq@3w}?{0t=Q3@R<^9}UKSb*V;LJEbY+s^1n`UQ) zBAVbxSa_2()z&6^DO_3$I@yPtSG5YPNsY&78oOy~(dQweZk73LxgL_`V;E4;Jg5X8 z{nrEW487JOR0tI9!$=i?VM!T+_6zlJiDP9wKS;w*x;?Eeu&&x!^%H9dbOyg z8@_p>P>CI#g;n_o1Nkspx<8%Xth$N>&pqN4Sohd)!9n!ka!(ye>`rQYK7C}pq><*c zFC7m@X0$5?xIuSxD4%%ts11 zKnIe8G8c<&WO`jJ72m?NARXrG z*aeoPs%{?=od=@0Yk+JQ2XXWo6+sdw?)2bvWX;6e%g&vEv`U zOx)Axh0+Gh9oQ%+3%MaPDKjBb=H>*>=Q_Hy$ZSuN}NzZ)#}VG1uqY`{<{X)$vZJ$?Ynt*)T9An_cW}KMl}{F7Kh6%Ykkdu)PXVA8p;Blc!IO%z z<#Y#v52QVJl&;uK#(QBRzSj>OiihvQ^0=|wB2{`59|&T4wK^FgL2rrt%YdVqg5aYO z^L2-TE#lbz#u>($rlr+!QIV@8APK+-5i(TSW9l(5?f%Z3U=Y!CuY zPvi0p{y=u|U?^}py637Npx=-(e|37wvn{~F7wRfVBRDtj)flUibc!U_zX_l;`^i6A z+C^A2i>i9~dK2#08-jp2;ho+97yTAMY5}#{ldSQ{?c;sSc5Yn{+saM`boE0Eylf9d z!K15M^a<2h|K26=8>de=?m!->FPZ=b#?VCZX6@WV1WhXzB-eu{kG;HeRnj01>X7Li6G^r;jbS!j z*MF^6|0U_anjU)NoYd%jz6p#HqG<&FT+q8U-Rh&m%~^o56h{8AHb!0@r@)Jp>7*=c zeOr^4$=BAJ`E&e9vG_1^F!qvgk-8a++!>WbzTrdauYl6jLgZp>Jl0vIhRy8*CKR!) z+r|bFqGPg=-P3_YN?hbC60>kn$xpoAg}z2l(8sO*N=G1dU<@zyW|Cnl!+mJg4YT;l z(4cKFu=1;b_nwLeO-MNPk%@!=y=7uI*HJ~aRy`fYm%J+T7L2FH#fh-wg04aesKEw0 z9$Gfh(21GWTapcRkqWKU(Q8CV_AQ|fWOE%38b!OShJFM2S*%CrrzHjnX@LOBOhPN7 zu#tTpep4|iaygq%(q~?5=nL7y=vzfNdK+6h5X-zO)s0**`~jaANpnr`%9IeskmL9& z3PvadsWCk@^w$iz3nFK|?zJK>?^FB9e3(~_vP!Q&Dbk?<;)E1Z45d3@Pr1aOeg66M zfzJSZB8Un9Ed1d?G`yN^EimiuVFR~iPz#;ULA4DCxWPdi)Ud;`q%E8AiIf4uCw!>d9)Y7eK<12%+!?rH-Ld3?dr)#O?ttL^)YlZUnUa9&(4!o>>}Dbfg-Q z5jb}K^bRs}9X}6{@B6; z$xaT2R~504(88X1n_h@cd?XsQrUC!GL`r+_p!COZChW8sYfVY)5GLxtz=c8H< z@vSb^KpU?^5y~<}N=A@Lhre#|{{+)JtWJK-gjBfp57Y{O~yxv5M z6?70yv+s)f>;fqfNS;>_&}V*4A!`NiM`+DJHJMx>Xh&ZmZs8oK6 zU~U4YrTe*fwN`yVF&NU%BsyIAwu3*PepaL`f30~!0=532CKF+c2r~z!rxhQ;^XvtO zn!x{EbjMTlkjp6E6zbJhlj8sW3wBYsFhsbMc5qiF443*e)j3qtS@)^&5mQa-6iJME z*Bgx-4U9ZD8J^J(|91NRFW&vFRT9*q;-p@fSmi7pCu*`h91ft>31K$~QXdgw)eptU zrP-|mEkho35=e=0>e+0B>Ox7a=Kt11T4eGRN^dKWKZp?N?EfBU>5ZV2Dv_TqIncMFrb1v1Zgl zzeN?-sM*M;;2;w6$8L)VdXiDQ~gRi$W=7Ac4{{m?}F@e}gDfN64lg!fg9~EAl*O8(R#BUEb}=EcMXrwmQ_= z#ud|GQ+8A~LC5t|>1oP)r9N-yy57Nz#?uxj!*}on(rX$c8?)6fCSR@usW1KMBJm2E z)j792)1BIB8XPc$lF_dO0vF3KHAMQ`_up4j}kw zPT}M7nRnA>1rDMOB@xEjtrS6e)GlZMjqvpOZ1xB0B|RNAaqRnqh*d%ztCsal61b_C zY|c94&Z9b?5*?rK7IN725A2dunpu(=*e~!G2ormasHEe%T@hR4cydhZZAt@*G{>s3^nCb**Ac8Kl%wCATC5S#{so~8KM zCh*30@k%U5GN**{OpaT}(=)n})aE(vXSs!ED1uPGyCl3{hRyp3?ODqo6)yDyr5AYz zD&$(gxar}E6sS+jR*6*2Tp(Uc8bwpBEePc%PC*^n|IWKn`-jbWBvVMXV0r}o?8|_@ z$hVDnOvl+MllcCBEMZ90v%Nm}vQKIBnCR^0!|atu2=Us~h53U-Q1?(%g(gOH^aideIf|s z-Nfq`>7_lEjlti7<+->evT6q~(|&eKmNr7Mvv(gQX$5|fu_a9?T(u4M?X>dYoohfM z@5pX82#3r+VzZq^WR3=8bBMvRXmo6fHZ%K2&sStu>KGL8*!9JZK$@0iA|vLoVu3nXW_yi+v$~j_1a0{cudv%Xn@Gff1zGJ zct-UF_3b4f5rj8)*ePm)dMN($)-Q3wh%(35#o5R8akVE|#mBi?37lFE7WxZxT|5qd z6t?%{#wfzrp<+gZPwP0?rZhNQ5z26b-n}J%bkW3q4`R>WXJfV&$*9Hb5gejL@=A3xt$F9FHhEf?-s2V) z5VoeWiC!DW;_mP7-ZEAC1Fux&*+&{N)+bN&6zeTm`DSu+8wwiT?ub00Pc8A<(F%@tZ)*5~|jf6qO5ql7U6y z3>Qhm1Ey(lHY&}-w@@;=9*+$sr*34h>rjSb_1Bq?#N8O@FnsU&KAGcV>CH1@XWr84 z5S*y_;Srdo{fP1t+Hi}a5ol=xcCNGbR|zUm2~ny6Bd=4r@)sf<^NMVLBG~SzuJ>7+ zP3rLj&u!XBdNMr;Ii_-^g9SQh9zc-l6jM;SN`Gt4^P@n#+(X&}KI`NtIoK|QC*F^r z8TU+BU!uteDKk#Z{8I~$R|N1-qlIvloFyWX0MU6()u<|@k$km$v3%%7LKu+2z1K~FN{$x^g&_h__Y`LV0a4xu> z%gMi@eMECA4DeujkS-)ZDGqoDuG$AC9)P5Jgvx?(xx%12#K4LE8fTIn`#~@&1wY9I z9rUBTMG@l;3|=fV`V3iG;Hj#Z)OV}q7oey%=i!0UyRIHZue!*3=~P<_P((>EA@n1W zFeI7Z1rU)%g&j#DUV?I|C`NvJDQZswu!RN(B_;1pomavhN9O_V66Aed-N4XBCd$9x|ZcC(|dJNL-T2*SL)$3iNUt&Ok0sV>TY@m`HjiFXUu3+D<6IL&hDT>?{id^XLbRJzX`&`8`$?F*cS>!SkAU>WWc4+9az9TC)xYaqFBQd zU{buCg*$&H1+u8`lzH7bV@)7hy|migqDGI_J?;?%y1Q;4BRnFlO6pz({o~Ayo52By zQ;-_BOuosD)Zd#I{r=h7z_;?l8n>E(Jg zQj1@7b9OXp3BT)SQ)E`;JJerv(-v(j0{)ZfPwK+x zKD2s8Bqjqp$~&U|yi_#9S>zhAOBCNyMXEnTQuZXXNK~PcbNAnYfY^K{hz<{CFCkpk zF_hiuJH(?!&+2eymzay8aFHL61(dN>6mslnbqdX~e{ULYyy6c1eFBJ^h(4%w8w@N# zVe%r}&p9kGM)*OajLZ)b9lt0t=HM{VeKtJz?kqM1N+U{JrugkX6D18us zpvaOFjIe<|DmYI(jF?%g_H$bJR?7t?M6i>{tEz<>K0+;4CQ=lJ%$B{^O4iNo=kN-= zWelDd;iiCkJCr#Z@(FDO-!x0%r?iX3nYQ_&Oyo(sWVa8z>x7kCoFYmDa( zYUCuH{^DF@R+@KYpNt{Y^*F`5aB z^f@pzf*Ey_jEaxLd^JD;QU{l)?gy&~@bfm26=L~^-?g5xs3i(pwjcV(X$ z!)DBQAp63ntOv*fkgc=hs)f(SED(uk|B8uta9Jks0urbm9@e8@gi@@pC0b|a2j z=}5?TlaDJBgwkrY$#-u*pFZ|B=CKT{aWsOe525NjJ^9_+Uy%LE+EY}ADuygZi2lIx zwdS%DyrBEsbTQwPr!ENtkP#vkBox9LBy}m+o#|eEi{@%ks3MGmsNR=f+1d6m_&^r2 z%o>yiRlWjnFY|EjRdNmfUw&--((x;~GUl0a7K0kVTX0kc)DT|+qPYyi6p zYZc3wM_C62X|j$7A;I48yQSa=rrne$c(tHSR1;QjL^nk*gv;w=O8!xTz7TaKu)O3x zLEvRGtN|QO#I|Jb6o5XJsAuO;Zdh6LJ{>-7_@}~+Z3;bgen?2I2ob>G%=BwA8d=3R zr_rV!kObcKQAcpLfL9ZUe%48^-31=_&O~nl?^01#8H6Spd6^15ggOzs4oV0epg>jF znM6pkd<6f0-4u&%^&$Uadh(U;od5^~RDab(shkIQbb}2~ulN~)Bs-8-t&*QP#)wB; z_#uw^bqsV?YUbcY5F2aPoB$unXcy0T?vRV4A__u}Qvm^lNH{>=`rS0p;5_}LVc_vC z|ArurAaV`f_&H@U{{E{0T&eRIrbn1!3|z$7=u1$ll>D`y4ap*q7mcq0Q9(cbwylTL zLqGi<>`|PHWXOmm<>F?fLwW!__j}*HRm>o=i^8j;nCekD;8~|m`0yFg(lA9HU0#C8Nt`o1nr3c%56*t_h!+ zBY0)cTpLc6QV{MIdn{41CcI+hqZzpk6P=Mq&Dg&){f?LZijATo+Wzu{m_rkY2On8ZH!0S-IZ4xAL>^}fG?D~(vfa}4Jmd2_|IOIa2^ z9>wWLqTi1m0x|vaW3snMqdH)q6WJ&Ss<;YR17Scw`t?~@DJNTC%)7fcxT@eqR(B2@ zeWA(bstBgB)#C_h8jZul&RV6>UT@tW!+^=75z@cz7_dWxn}@x4-2|YP(mn?|{}T)e zrL7(*tM9O&>m~`NjGOd#!~pBedft;+=;P*WxfrVHie1%pXdUIQl)(v=*P#QtxW|%Y zHKSAO%ewU^kK@*v5U!-nReS}?5zx@&Ix3nR}Od3#oBfs-7@P zD4G<0A!7BFoD+auw5Yg4aKT%+kK~TrhME=$Wnn;qvN_vDJ|wv!IU%m>Sf9F0JA18f zWi8My_Fu#A|8)BAya1H)IvkDlwDX)GVp06NznV%|G9~`a@@a1V+rx zaEjsl++yQ7)40=wMeki0`7M7tOVl0`;DRsx^)cHcxQCwGt^JivN}vrOKOsgVUVj7f z)5XM=Mq4A~kA($lWt6q21BI0%HJFI_Q@n!Z9gllYN@KH4CGT~7)WLk@o89BseDIji zYKQ41-S{Yo7!w^%jA!i8d&>MztI7YS9xbPT)qaQSg8oDOX+V!wK3!m8WJ-(PX;)eQ ze31jd2H4986Go<#+`$DrIWb9z!@1uzHg4>UAOnq#yoATo)^0JgGQ0lh7b2|l{wRU0 z#r8Qd7ZA&ZT8G^ZD~+I6EG=^~I3*{cK(T5^l&C(9^iMmIs1~+h9R`sYQx6ewnJ2zw zm4ZOw_2GXo{nV^d&6R4qTTc_>J_?dTK5P*qY?~aPD@(vwLlQnvkIvA_AU^A8g2tTs zf^UuBi+_GR(bFK*9+mF)n%2uOfT3F=h4;Bvlx4kGkYOB_Q7KsQ21WzWVh$K?a=3UUjIkgJOavt}D=Rnfg z_cRZ6DC(5t_@v&9OBCJjW&xBeNR|N~3C5}AK%jC>0r`jWInk7Ac)}k#zus3fz>`of zjNZcpb42Lf+pnf30+C&WOt_p;TId~<-F0Litg<_o6LqTH+(uf|SAO`-yT6;>{l@a( zMjzKP5IjBTyoLZ_dr3%U!EdvA`##oJeN3T^gdd3)fN5MG z@`20f4ug@{GPlu+RDWGHWwVY{(SkqNrohfcr`8>5E*o~f-^-J5xih+kvnbOzEB_wz z5T@h3@Yi;Eg&IQpH#5M)#bv6wG^}dQ`o_gzUC|T6`Vx}otqA$JOXz14)x!#n5v0xf zs$2X*Xxly*>ADO>pxn#~Ueme?0xZ>66s_5V%P;yQD6BT(e)Cx|9QQXIq$R)v0(n6Pc(LkZ;ir z61f_&A%TGL#TF1qaUgoj??1$UV|qce`fX@-m2+A~*Y1nB;2~7vNGWOJRN1G`fY3wT z_cmATKTJRT4|Rr*Qh8%PL{$A4R8o@UZG^u3&G-Lin*XQ%et-JThXKh1oI`Ej!ct@(VTnDS zZf$v&FI9#F^0)X?6QZzi{3C|s>$bkgEBG%fC@oy?=WnJq;66jNm?Lf)F0nVC#{+I7 z+rrLUnp|xm$LN@uD-tvA0f4(3-jnMI#Ag7{Bl-i6%41af2rFb*% zMaIFE7Fno4QG~VCzn)>h5mFPlfxE63&u)=Vn3VgG=+}EEX1012{7Cdl#2<%#57O&5Ho7{Yn)CMu7BIN-`BN z)g}Fd>38c(ga*=9xG&3KQJ1Gg>K*`;B58#g712=SyLGY8ML31?K-zIS^UTsSPuqX`k zqIIvVIB7Zi2h*SUfMj$g(t*wr^8{40zL0A=WsCj+SZLKmh-6w>R9~N+ ztY0XqzLtSa|P^wi?eo z<(X;I?JF^7G&t}QpDqGO3Q4os;yeu>`ElI_Tm_b_$QL`ulnGg%1-@p83_s(GDME3i z)Cqt2PS2H|doRA84bX(~>e2_g3?%Xp%OTrUZ(}XH0TWf5ZuX8&4J~vI<}bq5BcwdQ zv|iTS@+`UGfnW`b=J!RYID(`_vnX!gI;dOla;F*F6?cSM=5?UJZNN`Y!>9I+Ju z*U*qEy1d#i;o=E?uQP{=4mtn^jT4jbJBD8t50qa|Vhtkg-VY%MI_iXU9ifigy-R}-85GzrrVuP>#d_R#(Q8^=@5bYiwxeq{j=%M zEX-($CFfyL>a5;6EXCBZGhMzh?P#*mMf7=BV&oY)wC{qONabHN;z|iTzaHD7mVW3C z0OLL_A$JE}uN}HQz>}s3W>X>YNvc|`4~zoNb_flmFdR_t-BSX|Qi~gm5Qh3f0cWVs zDt8u*OZKLxUYJA=;iy;)-X#imL0yuc-K=7Zm{f?xbE29--BorFi6=EWd=+qA_BJXu z7ixM0Qf)R1s)Tv#E4w9Rf18Nfky{H&#c@zRJyN28~ulIchhQ7 zAR|9aLL9=Psv_GdK#r}e1v=@=k-Y^E`w(xVAy!Ezs^Pj0VZNGcVBEbKD15XC5Zsfri?h9jR zV$&m($3O`e(eop1*@s;2qVrg=aza*C{OPif!RLX=+;SV(PkTYzF{yT?v7s8 za|fIVg12~45LnZ2PX!InDNwRzwNwf*Cye2_EJgqk@VLI{K-$ZMmZ>)_ zHlz;{Gw`CR58-w2-0zYrkIcb)G?bX~!69?uWcQtZgi+ImU&+TlC>12WNE5x0`;j#5g!X^>j+nye5 z74<1c7UE$BNBCfRHb5v^NKh3Jd@m?pb9Pyok6Tj`j_WO>{=nN-dE()-j@Qv=D7~Lu zj4#g7j{=D24`WvP2b`8{*)pm)DHrvd&|yjh_&`;3zK+UI+DMEJtrlZrWV=IRx2LB? zv3EV^UZsGVZ58~n3-c^S(GqN=O4F+7HW?vC1dVAVK4Ah?${a;`E`d(bMQ9YVXE@lG zaqn%j)y`C=26MGLhpG(Q8%c}@k!S<8!qAAL`ab0jm@FyZX(&NKJs2G0MD-t<+&MYaok_m-ouT_a*_vlVDhpa4c)G^O4?vyoH}>5DKxu*kA1 z%0~rlA`g0XM0#IGsvWA%%)O z_(lY*?_Nvth3h*5;Om2a2>Qi~+)=DB?L{aSaQJWq>j6-4Mr?5By9ny5X*bfTnE!pk z!XB3T{+jQ{-Kp3Zzc~G51qL$*hh#_-l{u+OF8Uc|H#_e@W_Ndbuv=?>_x6M7bKfTj zZtP6|tdZ_+fi}{U0o0@S5i(Bdp&qAB1N{SrZR$^q6>9wA;zz1;q=GZDt-s|Dg%C%yr>1!@P_o(XW{jFdRJKW%yUw}nCIkOU?G;f~tTZ}Z=z|5w{r5&AkVkch<@Pwva-P9mNG zn;$lF3C9xN))1s9GF+9|AOe?qGxcHhvEY?nN5TUkA5Qsg#A`a~(j$S}gtkAd%_xVf zhK*ssWAtEKl2-6>@Bm28-x=5DB7SkhS=3po;k$xCvl;GVx;5R}_D@?o(*wVP_Ey1C zQgqpy2jTc$ZypGTDJyj#gq2nNRqOasb`kvTjr`P8+kI83S z1qnEy-CF2rP83&OG{%vi!Qn1R<)xS!ew0FZQZ%g#cGyXWhp3Mu3Ezkd+1IQ6I5f!L z8&aQ;9C3?MV@LTc*gYiE>SSPsAV5EZ+6eT9e}}BadHG%pioY(vwI5HPdqJvD zi*Nz146Z){#Ik~{Kg|9zZisJ8=W5Dojd6z}rkR{gV`I1gKHK`B=};a{Oe0X&)U{6w z5*5_fi{s`!KI*5_@70b6@;4+Ed9wZV3|G_Z`Y42o=}|)LPo^K)u>A9D=n7_5gw*6W zJjG=*wgG?*^9Vew22C&EMqn9pLc2# z-#Uj3QZ_>*FA|eNuZWX;c;uhbnUPTt7S-IQaiLi6s2DrFa7=R~^<_AGSdG{(x|KR# z(Spr3MayE$1EfSVf?>JW4Qx-lJ`kQ1R=~Tre=z-lsX+sl(FNWRq45L-Fr8G|+UXS) z>V4*ikNGswNtIU}JsT3wkHmDyQYPp@L5A?p9;^963eLH_4LCxgt8StTjz5ttF|JEZ zWiP_#XMmcaxAPQT?;0=#>kRp_tw>sgM(%J5myDGwv8*m?aj%E)<#093z={Z{Ii!z< zE2BYyj{*KZ=o>}660h*rcZCN=B-aZyU+xos7w~sh`o5wP2%f^cD+(Fa6^uo3ORkjWQ;X9JOCsSX*#5;9eM z8uz9>Q>$EX%m_z+wr|AHXn|H=8s=6lNi~$f;QS1^hgcRqVrlJFC?GWKXgX`nQj>;A zi`6@%$HM1)JS(Hn5yx0WFWS$4LIBSz;=q%K)M*e;AWw#UQpjMEzJ<{9PlQYMed{DT z6kc+Y8!P?8>4W;XY&DlxU~9;EvhB*HD8udZnLAW2yk#0rqFpjCzAtHi6r~E}H8n06 zOS2AkL&I!crMJzdu3Lo=)4Wk?^##Bzgg+8ey(Nz*9}JqoZh5 z*B|~j)AzqI{SwB29I1O=p43CdM{1QLgR+SP{W&VJ_aOSdJ_sXpTbbX08qjs@`WZH3 ztoLh+!5yq<6S@_(LYdZ6}HDxjUIq|?v-}a4PXx_%x#nqK`@O=aKg$Vrf z?r@BJlq9cEkU7kGm;`fAUq;Aq>sUR*r3Bmv^(GSURZ!ph%G_`hv@?;=C#O@OU_36`^_f$i zMWa+Gye?XTC)2IOGr`#RT|Gw2HdOw$-tNEgw#%@v1)3W^P?=4CJpFNf80m=mZ|Xtz z7{2=nU_M`$ZVMBv(E)N?O#nwHtbKG%wyaSA1OvjqAPz?eBn+Z^5Y@gj`a3j~LdMv+ zmC2{BLvzQG;EEHxe{bcFjpV!F7W9j{PzJhq|(H1aMs5&4{<;|X3F)05GAMJ+glixGOj4uC+m zg3zI+nR%&{F*=|YNVqftHe(J2OTu-i4=9-FYgPD{xy3CS)iS1|mRr+R*b+4Hnl7)? zx**m^DrL&p&7!<2n-}pp7(;L)L4&i4@lklqV!G=O2eXYhpP@OyULB zV}&F(kBE{)&DnZlCI`0CZ@h_Jn|9&mH>+s*OhRo#yW){`^=`41z=t1%gt&$7?3Dr` z{!i1pzb@e-Rol0QY(VPOpVUos?V}c;=31U-?P+U!`qCTTBa95#&A;G|f#HNvy7m6Z zXii@ChBrvt11jO``FIS$sEk9&&g#G{4_35F^`Y*lD+VHa(}zxOcNHT2j3XWl&@z{E zK`9sS|JL;5dNr)mz5B6%{1SD_pZoCo!K*9v>KfQ|Z-v-3`nir$T~+#&Wk|ScjY6~d z)OmkOPQAk~r0j-{G`#`fbR!R?aypx;^xl?j4=Aeof;)3cs6M3TF?dP$^U-jLfAjrs ziUGwR*;ekR6`8M^7bv)zb2c$uWRD^ayU#CkJI!P58rZe_^Mgwmn;<|;$Hs@gbNX$ZzkAik*jvAV5culd67yq1Ad z-&JsJeO{FHqv;P#LI%e8!@%9*t4OC8wJl8JaB2jy0G;hM4d__{6!x*m(k7CD9Qh_L z!G4>t54FUfPJjAe{}=OrDl&NYck}-r6$kZ$51-(jy!ik(3f^(CIlT5~B;e$+c(IpM zm4tyxfZp*2$@~wCSP&gRUc)lJudcjx@aH)3QR#U=UEg_l4+9Ak=sd!h0u6a#z;Wpt zn>Rb>ZyK0FFu|W5Rwdx4`i-&zOi0AQt)sg&J@M@}Fe;*$!%Ml)Or`n?ZJKL0d#bLe z+tzSTpBL@R_-eqEZ+!Urs#^U;UV3LW=YUL;09DDxcySP@5K7Du@@K9z0Xzaj0A$I2 z*G-IKv$IgRr?Vc+z5(<0!r~`_xDc!q26WJc>lyPl(9>=A0+}+94hoH0ztsU=(a~u}?a9tz z2Klg*Hv6Q`mIfzB%ha}_31N#42k*L;f`>=UpW0IYLPfgKMdKTCAw`BD1Wv%z0{pMn zaf(^>-Gr z>OHL#wxOGTCt;N2zGzvY^Y}EB98TC!Nj`D+i%w`Jxo;=NfV3^t+9?_XWK0ki!8QpV zTR6%nAos4}B)|P0c@CH>?T9CgdbAEy_&Q^h5NJkJ@t71)Md20&V^b?6+wm|Wsw~6g zN(izKW>2*1{c?uJHTrV5#%R#R-7AJvy}nnA<0euuk2} zKbw9h4ggbxIIn*}J&^DXbpb#6gXv%XsP?PK!m4xMx@~~&Aq8ZPowvECFDg+G4idLfl#<=*u10FGoeLCgwLmh-UD@7FHql!ue~J+i1m3y zu`Yx-$_k%HVwxC`Fw_^fX=dVCcAS&wVNOB09UwDD@2<4&DEp(+kMg-61QO%1m zFoacE`ZFLxanRb+82{;uQ1GzUK#24R9eHvbudD98^>sobM=?A{iUBc>{e&Wjo;!ns zKsox+!oZ^V{fko!Iu6h8f-Ldg4lHks>$qJrCIYJ zh5C-VC0eEm<@v;)-%-d0gatyl!m~fQ`ERO!6t^Oq!|xKY=)q>vF;54gFOhOSb$`uw zZ~r;TkoAHHNb-XgsmDoV!_BpeIiKN&ja2wKhi^%;MU?97P&eyyyAPTOJFj-f z9ljS_yp8TTsF%EZOJ9y23ML#5d3(QLPyHK4m4EFYLo5K*9ULd9nSV8}140Nkha$@H^&p|<1asVJ}mecxub4Ik14{R*t-J*kHKe=4Fy z=$N93pCeJ%jkqF5ot<6OGlMJj@m}H{a8I>5;qPkiLP}qSRd9WVp-uz9x(Yl%?l(K| z4?nGzbu@kFgRd!`izZOR{^sEPKs;v}#F|FP_QE!-qSp6dd%EMN5;U6!8-%4oF#(s+ z?Oqkv?}FDAW0*RkJ4U%+{I{KlobttRgC6TmeIxOv0762c)AL11>c}UA8!ser;8$;> zT`xB4aA}T}fSN<+MJEEEc0P0qONPcGO-xX#sloaSPh)3=c$Yhd9`Ie<7ZjUHlTEWKlcf&jVEH|m zg`FG%6xW6nYhPbl!pl~@z8ZSVq(LYNK#+gN7kdx6a2)Y^4a0 z5@I;YCgQV+S0pl=k9v;c?AQBVsOp8XzZ5hV`P~6mk0X+Ykrg`ocs3z-kvP}qS^h<4 zx2?s|W&o+zcPkUZnY%UCeGuO6EpNDqbtv8u6)`!eE!K^~uVZ-k_ET32 zs*&Ssz3oBnv76PI4GUETFyvRe< zy-`K7iDVWQ)2IT>()rnPKT?M-Gds-)$UOLGSplgx)P*{|#hbDrB{NMGP_&99N~^V@R# zgrcq2kDUP7sPiZ7&KY?D7e}Kj%TX&(8K1RQWPc+G@K;{fNo`NJ9lGRXAZGxjk%E(# zXF;MGM_k6*(7U()YWi2T9;_jRzE^u6p?2}cEIeXwBia}wNrIWUQ1J8(%yrSXNZ=(g ztlvcc%Qo=Po$fb1+SP~h7N1}lbSMKd$@WxG-KmKKd>BWnEKwbsrcM`8(oI7`$_IXW z1O*8B;62Y-68|i+PpPlqL?c~xBdCJE=Le%Hz?A?)sdJ{Ei(|ChyNvh{YJ;fPQeRnn ziw!aG#7!^*Jky+vn6F(jA!e^$r2zNmb#z)@6{4nkZ&CiBUXs04NE|s``-2IiG22bA z=ekx+YQ!EfosO`G+NoqWPzF_<9D8$glEcL2d>e|PN*HrKcu9=#gjWHjz_W{_DOtYn zj-tT-fR0g%&=r@C+vj_EGIcilY6`T?Xof~I)oWPRX0i-P3& z;zwIuupUViui#EZ{sFgcB zKU|-z^1TW2rX#q+q`Oea1i6T=Xr`<$>x};V+)gySz=?n3@TOek-X-^Vxkx%~UY%5w zMF7(pOZZ+;Nr|xWt30i*EoFD=5Rj4s3)QB-w)~lS|6rIW)Dae+5v*tRm+|=zrk@w_ zVgMnp+d0%v7U$%uk*yFnQmek8^dRM9exIyulDj$F=C0DgxK8yQu2w3uSXTW(7Oh>l z`HVc`yeKIcup=qXA+#?y5Au3^b z!bN6*Zy2C!5|<-SX1LQFOnKvS>HyTUG0?%7VR%-uTfB)Kc%(Zwg7T1@jx8fHT)l?P z!-@Ol6yXdLu3mRfs5?Izhi4T^?9mi`PY=|y6L$5d8MBaZ|GU#u^Q1iH({oJIVt!nf zpPBi$Q8&lfMTFD5xOoEg#iIWah~OKh@_RSai&aw$QuH)D^{3M0)+W3Ft3NU zZX*qays$E)D_!`_a$D2G;x%APcfsFJI*sCv-oLZfR zku%Ku&^GwC3iW4(oP*51htJ;-cntnR6)iputF%GEDGDf&+rC)K#IDT|T&6QRBvZdo z=S!j+$!sz?H(ja;F{=Wr_9Lx+7UW9}e_f$uCnJRGlWLQ(iKNcU>l6H@1=NIfU)-ph#;ZthG2$XIIqH0HZ8(*-+;;Imz_&m8!>^~m z|Fw?>wSGxStqE}V07FT=9Ba)W>N#^YRst zAq`tS+O2+ZiJO1eGIua@ufOgVDzl$<=A=-n-*^D1O7ZLM;l?4|OitzE)ZZ^w6 zJac?r%;6ChPT0wS2|JerlhQle*DRQwbX7^{T`c61CBij3WT??Y=RQKH0R&-K#P@m2 zMF$lgO%Liwu~bT6L-2ooUBCZ&x?OKY)Svsp4xTehbRwu{8Al!>N1!sA0RijgJlwwh z4T%8ivfxuAfbL8?^)BVrYlqV1PUQewp%U5W2yf6(wLS|GMJ!N$x4^1yAvP=wub8U5 zoRg-O)sSEvggE%;^^Z{uKN&9X@Ez)HeEyJ6*ha7DXi#G7T_>yO#=ifUZ|6y*;|EP0VkES)T5?DG7M%UhtYK#DFbDja7M<7jn%h=gNM0MDLpOB$_8#3754pnxk`@U^zljh&{Wb=?Cc(5I&i zy{+S2U1r!0$72Gv72Jqy;nFYjpCY;gA_!hIt!}v9G)al{WQXIMVH}nMpkw{@5w1)e zqFweNvv2g;PO)RRf&j$G^}g{3Pfo(~%_OJU9;J5l(m^NMR>(V^Y1+S5Oki$xxJWZo zbH{OnU_FJ_g#oZV~VZHL;ITkNEQ;%{_=(%BtUA ze|zrI6+sA0lJoEgSRzIYL{cgV+17F%Mdz>|?88*~`WOWJ#?ts)x>H zP>>ki?kK4S{e0F&--EsB7v9piLJ#30--mJA`9cVphadan57-0((7(8kD~nxZzTU@x zr)aOYhDcw4e1S(?$MbbP9R`5gkr7sik`YQUIjaJ`xKp9%l^>b%1j0>0eYK)8&yPa2 z%x%jeGK$Px#Y|eD%$J~e>);<@vwNS9z$+YNH>ofAb2u5>a^-Sz(t-LkAlB=a2!eA> z{^>sTA11^RFY{)wqKPJ!?MypEQ^19~+;eD>2bN-byBeQ$5yj>dRtz5n+@GA@;sf-> zy6IRZX_OPFye3VVF}03iiN0f|2FksS?H^zjE;?A@H3{VB{bIGZHTEK?eU!Sil0r6; zEcHRCHfj|KZ-JB$-?mn;%h%ydK7d-{iuSf6zaWLKrhZ=c&+xC4m;M`WyU8Eig@2Hb911iwZ$sx>8dgl=r1 zBAj(-Mu}uXICbKLOx=WVSe2J?V#IM_yMtzb=@AnXd9`KP16qs2^x2^P69)g;^qCKr zRo;XN&BSQ30AbAICd1G_pcy^K=>-Vv0r1hkxN+M3M8PF$Jq*iIUax*6Ztz#r_y2Q! z_N#%0^vy{DTEIZEgnldrMztJ3f=q(U<9df#yx8V2=Mt??rw5YK5sV-sauNFkc>xY0 zXzY>9^jf$4p@l*Q2^unaAXL-B0~5Sagw<<;F>s04Qr{~EOzU(-j4lG?JzV5^IayO# z68$4Qw@{-`F*oZ(hcx)M2u&ROdHn#0JE5qGtrJA7FYc>vL4(uD1+AlWLGdo%5_3>JX3dE0bqy*%n=s$|Vo&~Pkjyl+9S zn&UkRF${0SJabQBy4L~16Abs_dQ`P~F^Zb>3uV4KfG`q9;A|K3tw8mlu@jlNFMq%N zBrZIRwM)|5VVrI8{LPuA=73vag-M~;l4kb(cizVgmP!}yS{@%^4PXD3LLFoueLsZy zo9;7#z6=W-yX~az=p%p(vd(#~+`XOxDZulq`>6RKuQj~`;_-MwC$v&^|tN+gQ?tiO=skue} zZ6K#Eqr0@?<)enXzKw=tCoSXeJe}^;lSc88KX)I>3MGFaRIxh_YamVd`-<^%rD2=g zVL0ZRoRXptj@Uhr7(XF_t0ZH+m^7Pf{09`UQPlGNBNs)CQ~ibd@Yo%56D@vFS^MiV zzm9;f$cW~DNEegFG2Ed0GHYWrOGjH|0Hwv@Q*p9CM4y8L5*f8@i0kUo`TCLd8J5y} z_SlLBdc+aPxfP+_=jFa*xNS@+4HkVuYi#cdVj`PdiS!?S{qC>$zrU>i#lKko!dLjE zI^TNRW3XrFGh%lfSor08(70c_O+d(2yXf@3pS5)*2jVKSM90)cO5d2%1=|NP=F*P+ zY@|;OLmUGA8`TvAuOigxja!>RJH

M**3jJJ=@tCkzSPUReRww^$#6f}$D6xnk6P z8487ZU(aq_0Dm2TbCaWb>P6pAxJzIX;gcgMa)}8r;634l5r0O$L3MG-=)8>$4*tMN z3FCJaaztGy<#~C5H{q4m33Sa>7QIMIVPX>ihx013XUr><$>7lIXl!3H>cRBHmrpe; zS;jgAf*(*|{X|=e1w0O=Zr2jx2Th*?T33gM!i6Apys!r>tTj1lTKXXc-JpKf^kD7C zh1k4auDr%cY>|~^Co8UGMl!ehc>|qwc>!teVqXn7{Ln8Y5ICo>YLSSkB{`4V=9(I@ zuz;n);s;0P;fZ3C=NJi!zt>C&>W^uA)K!|C7J_yW z!r7f3)i*Uwu&Zm)Uy!IpJ@g3dqJ2Q_3+5~y!#7?C%}wYCb%bGg=GSf2w~*Pfyq{f* zfll=c)r2@4^!G=v$tSkq+^*p1Hs;0h1B3{##17=UWt>5siSI-ym`MGL}j z`Jrz1!APssPb$M(l z)c3#=vR$xT{sqsK$q}*!{%Vjjy4m$M8fYdHq_$2;76j(a;kwjgsBBrL9nYV0?3r(n zsC9&FE=h~=@&t`B!N{})_o_LE92mc!b}dcqa^fFYMs776*S{mtd^NcJ(!Pp@8ECGe zOw1s_S>+%1ey}0Q{hL?#JXi<+Jd;6vT;)Qm1fuMz&;%E_mW~*pFMIt&5Ce7YgWQ^K z`GPc4_hptNJx3}lztwXJQK*qEAC2oXkuhQ&$r^XijOz|gl2t}f1V(w@y0FX;m=6yQ z6DDC1WkY(dVZFE`M3_DmmJRKB5{QDjVAN(E6mGP1W1ZuOi*ii$^Kg|ej>4G|06n=; ziB#ywCkfalOc%%qV?;tR^d5;3xS+)sA!-{LnCW3wTq+MVL$p~^*yW^9)w=6bH`ml!l&lMcOQz)k5^5J>agoO7_G#CFB+DQ z)7HkeT#Xac_dlKfY3&F?rgGTNAfI*NdYc=tX6*nW8BzZuLSsbXqZ$%WY>o~e(ZigS!uo{~AX*3k`6;vVG=FIRm{3YM%q7RQtUC?mJGjep15_ z+cXG=MJiT4OP(vemm%u0mRL3GoY~bNk@Kz6} z(-!lY>v5y6iEX_{s&rDM*}_RW8SXJvfRY;urEXNhK=x0)U^G|=V8jl4r&Rd0-$HW_ z5OWLG5K-`p_{`|PxHu%fa+ncVL^`^A0K4klM4LA5P&nIVUO8$2^2ljByEAwB?2dR~ zo6986j=F*LqbvndqWhyd0KxZVAN$M*bP~1<#KYEzf%`S6WOtx@_m;_@bs_xa=-f-D z4U{(Gjm&{}Ga7LPest@C zSFR%<1wX32ewz?7pCU1{I!5=cv!nGh_$-d+f#};`m&n`l5-GjE2y*)t*H=OI+99p- zV#b*&V^+No{GsgUvO62GcFWGW(Hod*M_|+qI8z}&cP}Geu3$H-M`QLk*zKaNv1Y({I8@qR#Z1id}DG)Ve<5O``n+v`Byg^d4LB z2SA=|OroIo3D2@cQ`9=U1nnZhLfK)@Zbzm^3Am9MR7Am<`v|(pJ+m{`%d2jE^YY8yW1QhVyxUm7lt>bQmb@n}#KR*y@Zfmq*Tr zVlS?vdzLg#WE8eU(ZT6VC?12<+Wti!)95V$BVWt3bhd+VqD1=DaQCnkkcbCZF(z9+$(XOkA!Ab#sSnEK+UpEXH z|C*wH;)!i!Z7(9N&pd({7Ven2w=mJ zIY&VOY=u7>AV#m${`Z~jX|H77Kb(I2ht`Im?vP482W*|-VadeH_W8Ck)V`J+|NdX8Wx|w;9O}%Z`~CYsR)5 z!G``X|JHYMe`{qi#_pIA3qa;M-(}zT+IwfkH`)YUSCpX1t)JEpKmYmg^Pkt+kXK;= z;=v-GZZcZXjIFp^8L>Us0M~#=4Y7HiC74^K2O&wtibw-4DDLzkC8sJaQ$#wTS}Ar z^c$cG1CADOV^6hyP?CFrzGOq=TZFb%)HH~-;2JrX+i@DCO z&GHG!r5ZuHniOGS=FQLQ2NCGfn^qUmI8mGrs-d#j^b%X_EOhA=BBo_W{S2->=P!(FfpSpW$$1(st@fAoAu8ZZUhXaD8%A@sMebRSMd#Q zX@@`~lmgX96oSa6?LL^U=`p*Mvs+^E4fX2#e@T^+f5QC;_Yx`I9sGTTHvRC%KI#b5 zArK;u-pl?+Yf}|hGZI6ZA2ZN7aY@&wl-LddW8EP_fR*WEpckr6CRVTn5lWuif`A+= z%wggOxh&u^8N0qGp#|m_C7Ii=>e*J772j2jNYo2NG87g(H(LhTd^%$xG zhJl|#Y*M(qhM#SH+C%@S*=a0At=|@F1-(zCpzG2ojVnq**n}DP6uJ!_RW$0CT@?Ub zOwWOmrM2y2eWSFpe-!cTEXz@^NFqYDh?614KU0)*J5eyAuIt6K;d!?b#j<8Q-VA1BF*rmfw|2YP%6rGT285{;(8wz55bBejh7n5>X&2p z0KKbqE0wcmnc_SXB(tKIYqA8$1Er>-Xqk<*?yRlbe-zoZnhr8F6I)^uyimZ$CKtk^ zMY6EFYRyQC_YrKr!OsyoGqpSz57LlfmmSgKwb=9(Gr8(CBvuW+ZYc7O?E>I5bVm!? z?tTcY#eJIe5cCvQcM8EA!XB+ZKLHIhC>ZTi-r`)d_ZTUNY%=|<@ubD;#vQqXtK)e{ zG4QciLIMzXzw1Tz+uxWY#9*N0PcFW-jjh{d>k6rRS;E#N*lujt2_4r~q{$^dx;3Wk z;4+jFCKEmFyz=%+%$2wZ*{F4q7)x(EC8}y8D6vgzj#%U&2Jvaayl8xC2*Cw1deS3s zmqRT1IjRQeaBUN-rYT^b!YkKbfrQWNiuR$|R}ol&tv z@Lv1o!Rluagq)3B7%vN@lah+00ZUOKoR1gw!vhAX?KQ#g6tVciRvoO(X07<{mlW}d zc8VjgM!TgTTytFz3Eo);Pqk@Wgr_2{9aVFC>mz!=bdO7`M-ijBxhTZyPe1x^_~_@K znmsj8+&kVP6GBkhoc(bnpS;z6h*a`n{D;`#z*=ZC0-nojI7OCk7{1(0cI!tSXfxe@ zlJ?5kG*G#c^zqPsSvJabx$*|XWNY}-WR0t$6<+WNM; z{uOLO^tBUCXAxCb-Zz8z`*5JLNnQ;v ze2qlbI^^=12~6SsA9*>eM1HvTfGX6hfK!U&<-mzT&4{JM!jET2O<=fk&5Z%lraV3|!Fe~AjxXx*%|NZACBAC&Q&iL0F3dfy}Wgo7>q%ff}*YH%5++oEAE| z@rrM3oXs*1EwbAn29`joezO1UF@0t#gfgo137!4zS%JKDGPoPsiqKk^fZ!nEXV zPPidYLzv486RWF$tjLwZ5jSz$7`~|At9ey0L<@2wwCm4;_=OgFJ((-9QXI=mZ@kkM zPZ`1t*g-fK3n*^1=%xj-z_n%w16DDn2e;>4DUepQQH~{JY?x{b8PsfE*%f!Sz`Ek< z{Y-IH`T34OvK%(EBy5Pnb&_%U+i#Um15Hw*J7Ag4g=K&+kM;g(3Qq# zl`7Z4IXBuwhsK)Mq8izHd=Rp%}sMOK8!un|(6is0l=$9}NlvYd~~#5d7ryu<{eDw3579ouw zCkqmyr_&6lQlZgqLxFT(6MKWMMiLAHKAz)+hMDzzcv9~{+Ckch^dXon0q7GvmcI4r zKf$n8D!M2K`SLvIqGJQhNpFQU;Vek<6qPdt!AOtAkt7)-KRL}%ei7rel(#& z1?a3p!rtP8rXWa_loMAR(A@jQ5@n?#6mN_MT^ab^Qj8(%`d0mK@XTsE`@??ylh8_S z=9&C4KURt4nDGomVjdnoC`yW|e4GlF0Yd2-uw%U1 zX|Mvr*BuKaz~OdDq!Qt4UnNlsOlgf#TUYpp96-_{>GAB9`*3=CxmLfYEA>-Rx1%+v zhXzIObB;YEz(4@s#*6C{nS){HehxXuxU1PU+|e{`|es2{I%5xv1`flx0xs(wO6tWhi@dKN`5m=@W%<3JEJ`%@b*A4i`_ ze174<>B>wOiIS%^w{PKrlQ39gW%>|F-uYf-g`6;@#oIVhryS2%_=+BJxY+nM@_NzX+^Gt8!WSY*mqL1h_TK5K&#RJ>cFZwbuR zYi!xT`f+SC74f(`gfQwLZn&Ldy&t6y%FTyzw(7yps!VsAbay0JODNbf4q3lUZ%xN5 z1ceE)H3?eIx{#)pV0hOJRauBpSS#GT3ocNTXtv^kZw$Xv+iMHSP+!Y|MXCD<@+k+4 z9^!Da&f0E6`^NoZWu}Hwzm4_wt1lsR0#^0@1~5G71{zlh=tCA0}x#UWz~ z0-iT3A+o39q_G&!Z)jBI1p|->jY}jZdkk?<68mO}UFXYD5Nr52j2|p3>4#brqkEM1 zy^EAM`ER6CSF)sK`gU+Tf+nW3H%dxV)L2X9@OXuv&DctXpG-2F2)0GE&loq)Aq35k zmtjMp`sGKiMyB`Fr#iMmYL=+&Lu@z7+mT~aQ}Ff*;rb@twbz?cXkI*#J^FDT*-I;rV#=c=@s*d5i}Mn{#?~9 zrzia!z}%j!MP^j{iJY*Nj44DhjWX99v$?zlU8%e0jdeZE?V%k*Z4&6k=@8x(R|g); zRJwh*KfLq?Xx`UYCpI9qz@k{w{%RFr*(Ot{`ZyH{bCp9c>ryHT_VQ8lSK3b9IZUkp z?4sjku$v=yg0`?CvCx3_@v!tCJ3nr_6ACQ=q@K)c;U{N7v|MFM=iyym(VK}(} z*jIPgOv%u6jFeJhc=kT48USGsE3B`ybL?!7Qi`TebR}sKQdySuc@m3sqcpRhAX}K! z;zb=U&h7`b`0!`8nf1o;nG8qXrjxmYiHF=^z2p3p3k=Z>*0!gdK~mAm5y&ern*Na^ zuID;hjZ&eD_2Z z7&M0y4!zdX2(T@0(;fdYiJkhRGAhW_(t3wOh)QmkBn0a!HX*om+9SivT(Eh?Jx@DV z)Amg9z#i8>8uok~i(4}AeA?ooA|K5)DQSic7F{TMCbyOitw^W%<_RHPW!0H5Hi9!=5DTsg)ys56)nQvdZh(yv(Y9-)q}w&-nCWplXPM)keR zgcXpD!2hm|VztLhh0b{!v^tqHg-+ZVz79?KxiPQgknc<)mc(?QiK25NvH<=Z^B{`f z>LxZg!r5F$RJW@pX8i}btl?xMweDgJ2uy%UK>D+|Ml$oo7xirN+be${R>^rzFD-_* zB9syQ`qt`=`HHzRn zF|YKHUSfj9O-aBVuT*`A-@Tn1_aXdvQj`o-*o+taWdi_wERhOhp*wyaEL zulhq!c=zZ#Fiy3+u|2x~s%WR(PGw3CH(b`?m~gvyFK$on8a38M|MZA|UMIR#b#jjR zRvj3GlvOpe`)qMdkWo$^0`iAeD*HHAA_Cr|RD8Q-k zBx}-o#IJkJU}cn-EKERK)lq`C0+4#uctvZ#Za+LGi1wgBxxaxu((Gm3;BOVP- z^YJ3Vkg~-GcmutdPbHxlDcGj9qCBdiw#N^@7raH`pdqvK1Fbc55%u?0l@fgc)ZH3$YVC!$D- zPxv`d(7D*Wj$`|!(-1JZEb+d5c9?U4$ZNEeeVuGmJtu~Al6e)XC}8ch5tD~$X2%Ok zdc;D;t!2$#pL#yXE=h(`H)`$7azSzy7IG6M2_TYPbvg|t8px>EyaYkrS1uqu=tkc& z^V`ft8jQERj{yy>Qy`U+>wrBKD-(7Lj0<-b+^4*N53^>i&JfG;viMGm_`f1kqm8cv|NB~6&T+YvkJ34DdvIN zyA9dqKzm7=U+_03;%$!heZ$cEcP+BQQ~?{vNM@vOJsp5dERuqd7S1aYQov=_SrrAF zZ8Sm6wPD;%qniTcILrbesaVhKsusNk00OLm_0TbW?N@T%rwOuMD!cO%;cX;S9v*g1 z67Mr)cfA6Si1MEBuSf!~Pg&Z-EzV*yF4n^h)##*nR?cIDgpQ!)ddsT&LhXAl6^%Vv z-#nC%Go8r*Ui0}FY)sywyAhZe)Qj3D(PZ`3&j{x7u@J%GJ|?}b5)&Q|YdvlB`mEpU z6LMGnA{v19ss^JA;(fJ@K@dsL1tBFgNh`pq zzKM~6g z^B!s0JGQhSU8>tEhOS#pHIq&c)zBiOYe90b59#TjHJdkVS%lcF zJ>zEa;Wc5VC?aCL+w%^w=tja3s3KI?uiy(Q-l+WhZ0xA#9iKIXh{0?K*bW5ppWh+_Z*Bc8VS*+Q*^%h89zR+_uKnkN=$I&UOm0(3V{Vt@ft@tdqRp>?(*3ai23=pI}R}kU8NC4qcJ9+V9 zcwtV#8dX2HM3)}nFyO&^w+Fa_q^cUW^=xHYK1wpfM>YVKOS4Wm(VbG0MUK zf_82~0bP;UQFN)ECcAFdO=~RR{$zm!WGmGt3CM!XH_;qWd>BdTLj}z0uk2AHz3Zz$ z4!FQNfW6`H$kZqwoAzD7-qelDIyJwy=!gxO7N_;2i2#Pqe2Rg`LW)#m(#Ml#G-1Ml ztG^NrH!MuX7JDYC;x`|%682HsB3wY=Cg_qR85Wo?OAUOmgi@~5Oc8&GN+xU<3f679 z5L%OLQ8uVZY9sS+EdnJo%1cvzja+_x{ONFD_5v`48&Inz8K^F&^`l~ZBuwjzzdyY4 zWgIL3pdw7_fF3ZIdZZc4`#KVzl$nr?m3i$KYufBXSH#N0O?sbq#A9&X2+T@k#t7<3 z_kjd7pbAsu;|HRO;Cj3hgwrCxi(jxYE|K%~IZ4t)3IMz~E`(RCIOS!h`PP|)TIsl% zGn5HtEjQ!WK7->z9@8QkiQ`jLLi!xCv1tI2qfeOYAp76X5zPMFDzP_1=eyp3hMdFj z#XU3*2}e$LF%mFPF>31u73QyGfwX=pe(2cnwTFu^OtIaaM_3SVs%73`=S1LEeo!>| z5xgh5jq>6(JnJDG^~R)6r5Y86(W^mNYi_u>nQ#I)Kz|;!jSY0Wyz)^B*ha{KU&H1; zbCu`bf;%h9RpXxyjW}xe=f{$3Qh8)1GQc>$@l8OTvV+=v1ufhYt&~lcGXel^W!x3h zlEa1q{kEp;P=EajkDd?Pra;n6Xxv&iQoxohA_?c%n{319aS{9*YSMKSk<3u%fRWDX zP&EZ@5$f+bfvI{Ob(b%Xywv7jN`Tenlf@9$!Y_O(tnK7(?QuC84g3)DyB?1_dVimO z1NPv3D_j*L6iFW-ll_JZ%}9m=N8HD3 zzGoM1zfdKK?_3^i8yJ9}X-46<@KWeGMVCV%Fu-{T_0z(D*J1r|`Aqv0Kh`Gp(Z~@1 zM>McVooZIOV+Sk#BC%s5gk}{ACb7P{T7W}uGF+gpJ3Sco#qc6v#FLXZWAZoT4|42% z4-xb19-)4c$>^6U+YYq^KtOeO)ZQQZ?$q@Xb=;#)U>y2!!;fh`2+;KRbtX7<#@!={ zmOXL`3(#4o3kcyH96qKp<9pw$m8@&#Gc@&S*nmww^m99C>1EUX-TV|Tgy`yEXJ6{4 zTAu8~&-RsnKyUU3Cv=V;udi~R!!EN}E z*dM4HDg7J=y{&xIpunKAac1zAYJ&OJK^#Q#F5_m7fxU@PEuWQnnjL#Pr);MyK;<2f zLP1G_6Ryv;-h~NF8UpuCPe0b#W0W}bF^d4G>C-2Qg1?a%+G`@t?Pw1xjBQKVNpS3RJA^(9CrCYE8bp`DQ)&N8&d*G!|>d^)pkvQ2iag^LJisQE?V@-CnN9# z<*-&C3zi;{GSrushmC{PgT=E9XB%>uMMtn> zB9drH6UeG6i>7?O9R)7c1>IDMw3=RqDF5^)^riC3AWA2q@y3ESI)=NrPCaBS>x2aF z!%T3W{7kOTP6Z+cBQa)n5t$QdH}3K>A2|8T;MlRGtwr$jLX~F~w!?SvwEW^aML|8{ zLF~@Ksg^7>fRB>tEof1G4hNS#qv$YkSP#D$ywrwkSgbXI zlMP^#M1^O>AiV%Q;*O2_y8N?Q z$EVVq__PZG7&@#Z6?uW@_HB~@Qe48jXrL%3qX_9@Ms-jD;jj5+;AW_f9xXwBA7pjD z#N3l|u=h~?6Ix`$N3lY$)={XV{<4SzL{S?mRkuOfv?p;FU>lMVs7;y;Z|?& za2Jahce3Ft5-=QvSFPYC)ZGj+?m9uhXSz#<@jqfGX7L+^sd4oJ^iTK!mDk#_*M>%9 z|k5~d~EXGlYYjJ&KNC;p~IXhXktVcJGh`?NhvORsP#sg+jxo{RPq{ILv zqnYZA=2!=jb*BG6Rj{=`_3-gZN9&>ukFF0eo$8G*75|Kl*WVT~<^{4iL0|XO)F;F5 z6wUt0(7P+Y#u7$2hCy=&!3qKIBZotto416`!o4YRO2Yk)9%yZe^*f}btoOB=%<16B z5z^H=I6|&WdX7{6c!J07iS-JoW)6MH-}~t)I4w!<$06%a-C#|T%1$~eW%52q&S*%J zW%;oTMGmvBNhpOYZ|NK>5{1YI{0zY43Ln(DQVdr&0bVEM;b+Z>r%{H#dN@%~I;^SV zN!E8tIz%9nM$E(|c(>Oh*{#z`8WylE44vG>1H){=?5XA(M@%60}wqYR%^<1qf|@kmniCbneu0A z4TLZq)eydiU9j^OJ*vRUBf9}XQOpY2_+jk(wsX6Z*vyFIVQJiLb9*Xa=nLL~O`-)u zlg7lkEZ6t~hYhkL9lF+-85R>jv=|x}gf_Rf+124RTH~OzP9tA^)N1SQ03p@)$%o)r zrGVII^-FcDdt1ZjrVwm{bHb3;BMP8_1I>|-%x}Th@X>EYOoV&B?;-q{1hoYhLVUr{ z^1ivncU|j=on_>ktS|?_ZA#$yccdrq6zFCV0{bb3JXX=#*jl(ZyY;H$KQtO*8-omX zXl`m3c&&=qhe1J)Jc&A~B~cSEq=}Z+8OSHCpx5l!pTTNhfUJPZ{m?CCgE9y5PfTVU ze$Y!Nz_@_6udg+bOU{7e964WwCrmo$UE41nRuHQO2{!i{33Ct zYK;_OpqCree)+(guNjm;*^5v?P)xmoP77h&?#Mx}O{{3W#4Ok1rd|kRTjwN0Z4_$g zWR@zw9uBNQH4Pkj5}`FZo3whX#chOK3HfDntHa@CUA?j@Yfc0Cq0m==Z{D8vRHsB* zx>(YuwK=6h*O-awNx?!W2AG*wS5!ROb_jr9{gmy=!x$+^F*KVkof*sLB~n_{Jv z@GM-uOq_rT1Ex`?s8Ych*|cZF{wveV9hP64C*bo$BV0mvy_N)(O2PJhxXl~at%+_x zZWg%B?7Y^h?#$6scP(>Hv=KHb3giSpND2r>YQuYjqL$PJD+xQevx#O@##@x@Ba~Q= zQtDO1y+5i`$G^Cxp177q^zoRUX_gmQp0hhVOS={%W9Gf=ac}tCq|Xa4vKEV^C@RO# z!hRCwj4SnVio~yoy7@N*Iohq&lGIsXk@LNEhnSPB!XIJ0=xG>Pc>6HR+^_C)ZTiuI zH{*2aY_GDfq9_`oMQdhdR-h?Uiy{(?7bHXc^E2z8(n3z9Vvg)V%Ko_r4WTdzcUsbt}9Z zY|snxZ$9XqVQ0%fedjv~2BhY4=*%hjq2OF~DC9Z;Eiuj2OH*A;taOV!ZqN;G7YYLj z(M5E#!n4oR3Xq&B)(JT-2YpZ}i#}!yNGFu(%|Fa2)CU9YFc;?2(oFlyrLuX*{v@qU zDB|rb^xAf4Ov!P_-051e??E%X~FF-KtH<$_=A&2NCZb0>(@{ z;ob6BxZ+uqf(ka0RKweXm&Z{Mov36dEuytgtB$c^PzoeF5^iBHO~Q%WN8UvKXVubP zl|a7AbJ`;7HB@)hejB(W9>Y_zdC_6oMX-(Jsh`&-h6pc`nLN4LJqjcUT$pe_7(CTl z>6_45cQOkv9h%u34r-0c3}h+x27!o4NXb?@mS>~9-Qw2qz%hiT=7bzLg;)n7c)X6Y|Yi7MX58Y&UKGn{tgnKGMCKB+&DitcBD8;sXl z&(JMk72I}b4EDMYad%8!2y}>7MSDS{4C&5bL4`y()LGB_(A_uY_~_wDRdX1+Q6XWy!mXGh%&m4z3U(cR<> zc^f_UniDGMX{2HI=I=;e%(ChPx10cF*bzFNtV2;8g1BidVRvoLmJAi@p|9hPpGozv zr*PE|U0u=pZ9L&Nyy+VS%z0&~N)b|PIOzLCFdye$s8G1Ypze}b@Dp6hI(rqC|Ja1a zX|U02#EEq+#3_@$r$ulp1IGINXxQEH^)lL1?L<(&J*qWd!MnZ2!v=S>O56}Ut?ZC* zWW#E-A4yxyyoWM2aXa~m|E$dI;jvrLbqu(nLoLxj1anle-CRf7a(tLG`_bk*T z_9;C+>Zg4cbrNcmyM;dihB#h)JGnuAPP@dXtZ8`HF%W1&$UF#`q|232HIoyJAVM&# zSUDtf;hHI4t3mFt)4Ee(DI+&X4o;z>1dr_szanytovTP^88sW&Yi(*vMjiyGk#m`) z9u1UXYB}J3K$nF`B?C*aiOx5X3EWi~7H(Yx6B?!u&%ha9g>1sT=L5w3B56;?C=nc2 z6vAOz4@mE#rTyW^M}&3V1;6?q;%5hk(=SG*Xiy78%-)Jhbrk^n>c`dOZe!z1ZoXq;bD)SF$*4?@Nws< zLuC>AJ-`8wVTK@d;@zb~wXuxQkY(MeMDarF?tPsg?8wM3_LkmdwE+15^pM_<|ZPKmb zB{aePVZBUA)c`PT5~N4t)bY zIL1ZqSq!LZJD=U+DD_Fcb|1#!k&N9Nz!!Ccs2Xjw*j0(&bak*#86Rq5TiLk%^U_1v zXh^h5@f3|`uY>%#R&JHNZ9>B^=KyQ^NKqgP*c+#d=%hr+2Rx;ua)_`AMyR?nD7Ia@ z72G{QESSRQJ)?sV&xv5DtQ(xq8RoZNp^$`%;O|ED4ff z9vMB&f2NbJMZyWlbW9kTELP$K&N1CqgZup1El5cnviY#DqBRcZm}@zpd+Qd#eukZ; z*aNefLSr_-VYcTYpGbXgZ1qce__B8kCN)uS5&f_-2`*fti`+_0-Od;DSYvCzif`p} zdZ{d*MUAjK{-Zz$1o(U(ceA&|#fphGu|Vr@D1Gcb2h8>eX4l^|m_!{CipEh%?`R6_ z^SnBRf6%tkN!M&cfjYXzZrh@4}SOo64qZs8)pL&)K*4JH-@bv%Xx_)CGOYx`ksV=)RW(^ zH#|hNaPB(91k&!oJM{{Q01Ksz z7}`Mu$0!HgVUbg-Ytk|LY~iR@)`725SM%NBlllS$Y}{xJsBWk-6Fvf*Ft^vfqofW~ z0T$tC3JMtJ=H{@Bq#7{Aj|X;*0~b0s2}Bd_H;;$JA|JJdXbe`Q=>j^iwz8WEH^d=+ zo(Y&yeUuAic;ZDv@uIJMw;D|8pPtYe+VpXUDJYEB`iUyXj?z;Jfe%W>wK*4Q9Fl;P zn?=M#^oIE5!hE9g<`5VBuB7ZC$t>RwrZu)n1kn@;;2HO{R&cEAw}$V>oYXP>OBBi^ zDZqM{P%6?Qa_^VyN+~Qt?jK>{zYDzS*o{ge+n5MI6v{HyvgKP9KY&ABoZF=1WlBCH zIPjShngBqRR5-utU!-?iWL<$jshnKc&$=TO!KLOT+&j=fK6c6?J3v8o1M8y#S2K#d zjj=OeADI03ABM+&<)=MIw1+Md@9OStHE|E99Z1TjxrI)}TQ(s!yM-IJoxvqGxcx#y z(M)hkx3^FeM^dyUVWKck9e_;!jb>VfF?oTFcag+s2x$~(^Gg{R#;fLFi6w}>Y8`E_Aq-KmdxCh$ek!LF-b!hw4hCf%ec zS-H_smxs{hn_AEqoBs0>QzXG+>4M0FfZIfGb_NiTGcsCe=xCjbK9}am2{aNZta_c^ zWYj4KRIjHXI|PmliPo697K;O}*o;?r1P3`Nt}NXNhGw9@@t@)nS-%k{(;6U*S^*I_a$`MSphPce@osm%00vYPDt9`AjFb5}?1yg=DVxRHcWQFx<%|-w6{4i!HI|$4h*e^xpSj_iR8&euX|VC6G6srCUk_OU{$&K#j7={)OTiDa55wc%dG)}<)QKP%c{RYROKuGC>F{|-2oTOlFNiw4 z{09mcOo+IHi$#=V5;+;}C<-a*M-M>X2Xezd2xhnE^GP`vWW>}j)rKo1ODJae% zkb2|_ZX>+(-QhQiqxQ2e!=iy)u}e&F$JqHOE$@T~>FG(}xeuoVUJ>1aXi|`~p8qGq zpZH}kVINHt{|`>oWC0ybP|fGOB@^W#i}q0l(Wlhu8!p6 z_)RO_Cm`nSN#ZQ}O@Dp(vv2RaMK-P1m~tfA>6E(TD>lQe!rsKbyRn42KOu zT}xEmQa?hlY?}1#b*ZRz%>aMO#h;D?D12T=Y;7KfP7~wg8iN~!B8gxn`8xVwi{SmP zJ%T@UC8#rCETYQg>Mf{WZ11(vr6|@k`|cZIMF|D!`K}eTqv&e8@|(BKNt_Wv3KH?J zUa@7U&a*{??49+ykkMEVYxz@e0GJ7`RHqxY8nHfJ4gB_dgg#N`CCqMl7f$BN4O<3E z8o#Yx{P^JyhW$Djo`Zof3-zOR;hpghDPO}nD>A&^jwaNzxI@8UPa6a5+QF(aK|7t2(6FXO50QJ!d1A@OfwGkAj40d zp6?epB6cpz^4kxA>6l^F*A?EXi&0Q5r8g?;HahL&r^%O8=*SLVgF^L+J7%q0N@(D z@@H;!4w4a9Zd&?lKbubxi6sIe0qw(WT~Ph}hOCu2WMy`Jidsjbg_4V#I%v1>?IpIC~f^+j%zxuxh{f|kIPsj=JqFXEc8y;G#YN)L2 zj-(zaR0DKrhq;x-;a|%2M?lm?slm;CG^9}Ega#fdE^ri40I{eWAXXW8e@53<0JOC& zLNBHhaG<)jMQ@<5?oM(&)#o8Roy zwBMB}IGfMj)?S6$CX!d?DR;$GH7kK(ow5|h!Y=u)FV+irAmXrLSHVQ=%LLCX>C2S5ScdiV41DOBw}T32JH{EoM_hJBR^Th?X5_AKOXMp0V$iA&TZQ(&U1x@W zR0mB9&?nw2id!>jYq7kH1$aGe1UA_<^V0TkY(I3&`6+&sqC}k|ZHrQG!b*N@o99Hh zw|2YYiHU=!~f$&b{F z&8eo0K`~9`dx58KFX-g!)EKYhgLN$vpr?V3lI&GxA;dY>+&v&sKM;&IM%3w=n>2!> zytYPp8V#g0C?pW)N_bZMxyLO_&>#R zLZLpBaUOC+y-dOb#IKS2Ng+T_p#du>(#LP|yN{Q+64=3JfL-pHT)uyCsWyvdUrZRx z4sY#=j#Go&1{|K z6dwGoCC`-ZBH4W7Y|iexQu&1065i+82oCa>pCMJUwdW=z?zKWd#}00jopFz7AI`4a z2*2X)p4P_>G_(z0h7*3Q5PHsr;56qw2sfkIzaOo(N7FjS1+@?LZ!+ttgir_%b@iVN z->gqf=xySKk?DL#ZL6Q&upj{;5_M>eiu{mW6b6N>_7rY3XQL$x z{iMxX>GS!!Zi22Fncnx86M_dV*`t%2a$hfl4tv8wp(ux6{Tpvp1q8RKx}qz&N(K!4 zZ1}zJnpE`B9%pHixo7T3glKayjS#A%H54i!VKI=y%G&T#F=P^_PKa)TOu`^3miqbd zwOSosFofgNYF;O^g_hw3X!N<@&TAGLc|Z&n8w9tf0$J%ZsF!Z12+`@IAznwZhyxg} zQFwXeVx?dtlF<)wL#sL9ew><5axiNexQsy zENXvx3o?sj>Z-ms+4ES~ZFW+ELuoM_?~X~r3WoapiXF1Q^y&7&0`yj8U>qU}?IiM= zv^QF1aq1~Jb#MesQZEJK@}QhR{j3aH=6qi^?O%Tt7+$6A-sXn1cd^dK@Y*!iWbq2l z;Uj$pyZI-gFKNNGl7mjMOE>o5i!l^s!|8LhU1!&-V^+UAoS`8BW5{!GLbDA}>Vf*~lIcJRmaK;()%> zH-IT*9Qrpq3^Ej%)Ecqo?z-=bt(HXhR0fV;cnHPzn6Y9WJ6puLEHG$Wi<3H+Z7fckR$B@QM{O2hQ<6E+`sb^u0 z7A@LGSBU zfPVHPHM~QKH5(kdK6QnM?dJWo2wJ}Nmlk|N!InA^0eKdoXt3ppXw;3v8?)}`zji`j zG1eU`SS-vHcFw+%sy*YLd6I(^NJ&Rm0Jxw+>z=`9ADN==7B?IaMMb?FQwf58Kyq>k zq19@sFZzw&@R#XRpvzs0@V#r@{Qa5k@JSkl0uMeSL%GcnETb0A6$6(LH3Rb%9Vxp)Z@N zAvMZITiGR3t`w(Rbb)>~5zr%Q(tW{A_m`x5&V)u#UIq6~hRM6+CwaxQuGGeuY4*Qh z-vE?s@pG0+A*9gg-T;Kq`WJg;;*d)pv#>Yd2#Q2e>O8rAI%Xl6w``pTvXeurYU&=m zME5MB-cN`~RA<0?q{!flCnYhI$nsfGb;q4qBw}*3EQ0KrezprFCrk7WcAE*-B^~e- zpC(-kS`l%aCY%k<&_Y$cgq8X50>QBLP#`?hOX)Ypp0cJfnCk9Stzpu6Li1$4Z|ry- zhB!-7Bq9Lc<*PQDF{+~w_d8-`GarY@;Df(8C3AWoF8 zh;i>A$NTO=XIbk*yM6f1F()m24H$#u%MgPAY>>iTBL3t13p$6Z{CBkfNC zjz?40@zO_l4S0PIBmsvkmZ-=pv4_}54cj)6lXZ(``(XE`F_vKiVRhAo+H+)#CEhAz zX4W2KzfQ-sl;y zc^(8z(-;AJ)eh3p1%X9;+M2t7GhGj8Jq`S}-4T@4d5Ufli2kgX+n}+kYG(b!y)g~q zhY0DXh;hk4wWP>FA4IT4;byS0-8xG)eLYtdTaPVbo=O?}8|w8ZJ2L!us-15mUGo$w zft9>Wu@Q7wx1_nxU_QL+UoJKGG`n0$RZyC;Mqi4wcMc)sxsECLoJlYIRxQVwM_<<%6L{P>p)MZRK_2 zprzXFWRR$X+q$4@y<)n2fBc}lVZasy=twhN2~@FT2$|KOI4d0W&iYZE#Qvam4z(ZB zaoGZCqnR;jxMGo^J3|z-=(w-2HAq5(_|lOTczEHpo(U=hKh-A*AzRWj9X z&Ugk;97{NP3&&yp??bA@AL`!6aBGoiD=hYRTl>@qvl58n!WxhQT(XY{Y&k~2SqGmz zOzAU5f+5^i{(elMGhefGWqUYi7}tK1Yz}>B?cT?FB%GL9Ch@!+JF`WJJ$U}ii8d3g z?(+q42A?>&>Dx##K|pBU7Rs*ABi{nB#h~a+g%iYg^ZV&su>Xu^HyA{k5lHeJIn(AE zVdLkXIaD#9&s{%frJ!Ogt-jaytj>;WaLcU=l68ogo9OJa$pM$ZcscySFZc}hT6CY3 z{!rb}LG}^usDqY~vRB8VK#wo4k2mzmlI?K7_VL5lhwm1Zy=r`J_8`!4Pm{e61&U3g zQ0faxoy@v^fTlsn`fmh9-g}7$7AULcoc5g}%eDAw z8#OrTg)XR_#mS;WCwFmFh#_^GQ4Ea<=>^D$BJCIkjVLy7tGrPVe#{=<^{yb646Et{bmOJ#$bRS(x>~C4jU5)d{RB#^?-EAk-YgUm>JZ+DX!8>>bL@9g&4J%c zm5KijRCDyvMfhiHlHnZg*+1a^JILd;*VdOjf2q*sW z-`C9v*%xgpA0#jsjqZCp81|p}X0e^Glv~ec4q(E`>Ud>mSS9Z>9*6-YG-Xa(+c+o6 zh8sQ3_|M0DXRC?~A44M~~HUS)80hHK^n14kls2mPkJXu|x&C*zD? z#9a|P4R9anV@&bO5TzY96QJoYY3O>#88=s>ZWj3O5Kfw`xCEL#SBSK(Sy}iD2DN~y zg5c&1a{FAq53|uL*0RKLo!zfpGwK%;$)Q+{VypF0(>V#s+iFK^HAH=_1}a)8$Vq@^ zmN*tZ%2AGl96S)m*S54P4_nemnYz}^;Z;)gtvDAKDlB?g{S5=Pxb~*mPg|yBdF?~j z-~CVVWf)RK=8)Tq^ROBAhbK>K{epmVbw)1#$?z?o_-8EYsyjWZUU^wxR0oxrW?t2P zjtCCbt{YOebGt?qk1I-@0pV<=M27R?_oivFW-z@>$OSP`5-5nz5B7CNCwIqnWk0)c-oLUqf zBatwRN^BZ`Z0Dyr&kf zh9+M(^~?Mq#sf$){5CoBZkH^lw4W!vCLK)CC?m|1GCIB(4*XyYGVT%1a%_!wkh}mN zYJma$n=#Qjo2GtLR0uxfUX$5YdGfAVcHymmT7>aDH7hU#kwB5tsrD2(xO$f0NxXkvYJc`FMB&)u6j78Hm+ zIR4F8*>AfaLhY(TDkoAj1c0!RN;+Y7bfreD6)0%LRxpkfdB}n#DbtK2)28<>npMn~ zjJGmM=@5#us36LX4dhFt0ZMwVICp|c`SVAu8l*@=!N@&-S+;rH=4;l)fY|ywCd14h_=2QNX9Wcva+nEr!bvm( zA;SGGmVHR5_JW)2bPdT5UZ`ccTagul_q)YnhbJH2Z+c z*X(Vn!RcZQ%Z?KqB8*}s1WaFyo&uws{SsSgPX?`IArZdwgJ##)ixp@tD4vrp+S5=M z-}bT0=r!WQ4%E`>E_h5~^KOr0hImfR~uW@~3C)l!TS<6u>hf1K$A2tvM zaWJPMxOI{!pJCyxi8020o8iJy%bt_gH8$bFM6xYi;bklt4BxV+1Vp?ApwbFmx-S6I zR|za%#NLlbMQm{8U;;;!fy$i$XlhiAiPJquPD z2eEr+G~J(vBckU{8%U=vjwoCJ7_Lug;efhcbA`VDy-$aq6qTsU-lAvyZqfdcT$@q4 z9^Qgd0qH$_sU|C-P1Vi$vSWVshnGcg*frB^c`esGRrlNAlLKiC(r*}J(z zIAmHK)6Jv388 zNC(AY-a%D9uUl0KtlRQ30q-Ijz_1$>j$EL+NjYg-wH|yy1Z5ns&}@Vcn1xXXtYX2T z>jeeLHhrL2c#FFRKlG{y3u60XtaxYbnbFT0f~5VCztVbjDpXX9Y<*qo&|j$AFo4%u zGbs*>BDMU@yBQAB>rm6@#`L?$km-58wDp5Tsk z(B!v>L{L3@Upz|PPZ`Y0KMIZdL((p(!Ccv80?Smi^(1#sk_oR z820Mvs{C2>AR8+Cv}a4&8P$PNg@zH9K));5<~d@JoWo5lNc|QFvb2L|+Op8Q|VMy>OW=igiE8_Bw2( z>TXfm{+Ub?b-#Wg5`@P~-xR$|7Y5N$&4g(i*TDg->DtyQ-4DbNRtv_pG%UZf&4vw| z2?YIZapuDV;MIyHWT$uUnFvo;w~9d$$*wzWagVDorEwExcs{KSRS~C>8t&lvbUk~w zL+;Xf{_}ByM?Jm)iQhsS?cZ=)Y}R$)_Z$=!KR2h?KLH>J>!Gp5|uS!7KuDD#V8oo1gv4r&h0w`GHi zNF=fGM$$8f=R;z?9$p+hx7^vkz<^sdCaPy}6mVuX>><=k^Zo`}tToFwMlIEDWdlE{ z9~N9gAH}CrcQAvohyfu6Qj6i?BUWh+3djW6D10qLL8GB`XNIr-RV^FSi95B6EyP#v z6Go2(AMX@{MaZODuWYDjqQev@1WDIS09M=lPh?jx@)OY9i~CMa)MKMdy>OZ|sEriX zp?^mNR^pSY6aQ{!u~hDyq9&6DQJrA=bE1kKlZJcnz2S!?m)?V$txce5=b?pGIiyA$ zzHVXF6@p50O%JSxXq@YQT`^=yU@8_%0I?a487gSlr>ihCp=z3ZitW7C%ShPF5LptK@*z?TiyOA%ABkD=>cpzKZZC#q24{EKq7P}9Ej?cuQgz)-Xw7DqMg z554#5gP41XM;qsxgu20WCJ!7Y2t2GwF-Umd;GF8I{ z^ejw^uXbhk#8;lD4R*+_$jK>kB!o#>U0pY@k>1DUkB0A>Q>cR^S|zW;)$Lzk64fbD zEmLHnnOfn~H@rhDV)Q1E2v>nLQR|Kgsgj6d@oegODChB)cIkW*Vv(B;yL(j)-3k5} zc9{0j3#{B2lt2FC;gA0~$c9)EW}j>nUqfr0-R(U{AjtYlY_{1V$QoKCiV&_`rUd#q zV|?27#4HB>Yr5K?ImuN-?gc9Qie@VpjXd`>N4?^9V6ANHp zwPk$P5Cpu`VVP!KOry~keuSz%%8BrV% zVG2165n$q~5D)cY)e%y6Q9nNU7$8F{PuMwO$EaO`Sdw49xWg$Uh-_;oP}DG+42$|V zwsRf=dpf-;X~XHdRtQAfyc^Yc;%h-JI)1c#h%fR%z@C8{N`$pK8 zg2!23mU_d~;VX;4&g;jSf)@@Nk<-5l(M2-8e^LWB$dc9J3NR!t^x6F}YA%311r9_ zE<&)4NQ5Dqs~}Sn(0PD4<%c(y*h|ITxYr4IEgR5@KaN>*8}v#5G;K{)uR;O!anh-x z5IswTdxM@~0V5oI2o7hClULQEDEyLP)^L101<-Dhn3Atd|1=!X?q< zhq_!!kvD3a4@BwiL)p^*>y`zq^<78QtCM=?2l5Q>Zt_xz-2#4Gc9o|{WW682oj5+k4;@?69bpJX@EYRpjd84U`k{k-om++Of(`H)qA)_I zsn!dA^PgpDCE9o&UmX9do9{GJ0SryONxe%h#bj!RWagYqLpEQBPjWCewl(9lm$P`F z+BIOg6eSUbCOR5H3284JH3XzTf~Pi*JOZ(7EEbHN4DI7M2p#BC5V0NAJjM;gVcAZ1Pw4CD&vEMrB5#6jl|%G^1Jj|1AN<-U_lE_BdYQn?^=m5gx;QhOt4T$Tl*1&8jVmXqIteE1IWqX9(9GC12i~I$ND} zjt7*1(1A?r6wkQ0Sr1_1KS$1~S?ISNg+O<9uVF{KGhKZt<58}6eL{D)Zhk6s6c1OH zl^gd$H`|N_AL-a`C&7;D-pe!V6|$Pn@1u^9BH!S)2Wk}n%J~Hcm}wp#h9$#6%;w=5 z06$>*8_Ab$d=RsphTVC1f64?kXYA!*2}WwN9Y-7v59DHMncNp_83inXgN@alx4t=+ z4c<`;3)VCi&$5$K)yFsgRr`B=04u2b?c8&?-GuhHP)@qgfT!tZcHsavI zkRz0LRzu!kEdX$+*2Mj}81Ddo>n$KW@e1mSi1c6= zbx*c6KZvO1j_fl#sKkGr=wCqu>x{0LD7faldTt*y*bn~k177{{BFc*}9~kfv!B^GE z&)#!@W;!*Mx<(&|i`A6UP2b1}wd!85qOG+@Y&$9sqfg>to6x%6DnV$%uF933fN}l) zJyPPxShwpHKHYSC#|BX0!DR|Nsjpz2&U38ft~z7~aJd;5L(B6!5Lm?*yTeg^_wmCQ z!-h8lx?vu2avUPo^qRl0o4W^OKJDf?3i*2n4^%7MtRMGg6!3qcg-&M|>!<>M_y?jewcB9w_|T$#y< z+fZ;9d{RkHOeWYVgQHV$V%$+ledfF#)DhzQ3~#)H*1tHeQ(Y{(f)=2JUOsiGz)oqGX^!8XNG){YY-K2JGk~zZii4%UEKy0shT_29 zzz#=^CmZlFCIr=mwNC9>SGR#ddXje2ftOonUNGRMlmPJUsh;4v5ND5$XlpF+>)+39b})FUKNcaY1^~ zqe{0a2Nct{Z}_YKX?Xl=Z{ZO6^pX2^pssr09DGQ09(1gcBK7kS71cM&!UJ!eZa#Nm zlHaKqR>~#i{l!G(C*glz@g=Ti9SlRyFTxF($Qmob5DDGblf+D#R${P+$%r1X8;gUR zIYw6IdXSOWXOR=3^@wB|K2;Hk`rme67MMs#BaLDUSL;B1?L>}UK}aH#P`3bXSd>*Q zMZJJXo2^P^ds{X;zH>`X9baBTrM@FWtb<6>eYdj_EK_9( zeRoSZ73~)Ckb2=xJ74?S@U^d5&|wFteUB{STI#DtOw~xLeoL5XLd06|k2!n zj?3bEK5YAI&*dD@#=_B1v?7*i2EX z;_A8Ee=r<*ORMe*5SNVs0;y)xCv>LocMBw4dNYpcVGPX&E0fW_jiBh9nqkmQTv5q0 z!7=H9UL*%}H>YS4_tQ>hH?>~nh)NToIayl$Bre?8^-o6t&rEfS^uYNMQ&J9;@V(hq zWAzw?=hIka#u~HvxpbSz>sFn?T31LFI$-10*&k+qFEU9B8H>L6Lu&N|3(DECQwUWK z(3+*Z-O4gReAh|+>aT`>{I9%065|A&tchp$@1=(W)hDBTe&2g;;OaS-(SXKx{cqU% zS!qoE6Ye)xA|iOU#(Z=mSmnGg4(n1!i!dyjGwc`OQSlqOwPZ)T5CiI!%_9_1KJ(o` z@bp&=T!H*;SS=^?LRqXjUV&IgeP;#14A7aZ;-V%sE3ALxwDzRaD$!=9q0KR_lGoF^ zUM|8$TzM`21UW<5V}1lRCTo)(0k)r`xS`4Vi~@i1A$`!&u2O3}sOagJLuB7M!kNBN z)F~=Yj`-&At>3LXksEP)8X9eP*g32(B5TMynVK-bFmjFK`TFql_#<%RWMK^=;4X_W z#fVWJQ~h~s9dh;|pxU(Z1Hu`2(+sAPU6L9Ob(&#`7PY9QgGGiTeei$**V_-b;AwrD zwbE?B#>pFsFB_-%0g#$1!2OG+g0jKICiX&iu+e5txvEO1BUipnZnk!U=y$5JCiT?%ojx7P8$heo#(4TWnzOyNcmAe{=ZF-z<3mcgCzrP`Rq4 zFqM9Z{fd@kg=<+#?45}J;&v2Q5S&=VIjTWF`P||Hl(ztTDZCU=M8ehXUN94inhKxK zmOik?NW{z7n?FHK8rxrlwT>5#(C0(khUxqFm;B1G31vwhI7lnPRtt<7FP=2jJVCnF z`mv$mqeUOrFdqWNwz4WRq*K6W9$4r7>TuYo$Eid)Z;3dh^It++$i1sC>c#c%y~APK z8&khnc*DN{n#_F%vKG%otzhbq8~EeUfu_oO#QHSDxT`$6Vh%lDQ_wMQNKR10) zHlhobIc@SIL)Z|FVGhq%ZPV&p(P@}{{)%vzn*SNP58qN1W*H%xVx>i_bWc5+9%Ay> z_W3*2u()rN_|KY$Aqujy&b<7KcQ<$>1Yq4h|96J(`tOBOUn8VBpibPRQdQ4~eILED zzL-AV7!0pgemUa)msGNg#-v;<2=6B(peOIvB0!%Fi-<6*{w3Q+J?E6)#NUB@+-8xu zeERFtwQ_gKs5$76fL75=WMsW6>H>4HyVN$-Q0To@E(a}E^0%3 zPP!+`7B9Q2BY*IQ71U`#ZwC2|RSN%ySH{RNie4h-rM54;TeNMuzFPt@mOSdGOmJvz z!7~L8Z1I!$%MQRavL=0`Zp4NpR~<$@p3klCu@&KF8!0+4p^e-rk7Wx2T_?{)WM9=6 z@NLJ<{nnD!Nxqko9NjGfg*od!0?(l& zKL*ItO0`{A))nmI{9{JnM`T{Z9gy8U$by(%bji{hA>Cfp;`Uw-FTHNslrAE$+~R71 zLj`w3W8|zs&=?w=3KJ&E2xZbq9PQ|2yZ8&s&aWeQlBKCwYx2!&X>=L*+{D$y6Q*XG z6{rYl0}GA2$$IJOK3nIlej}FpO-~db3`hP8pyFlJIs>i3qGXzV#*k_9dDB5ik_MBg zghlhCkIDWQ9W}9B5_~KuM#6PLiIIWFYA!h9?CTscI9jiv{w#^>5KQW_NmCN9e$Fof zYox#tPY7eON~Yk$Ixgh6n6;ZnpRS?USJ}_{FpLkMxdnv0=h`SFY~)WHkimIi)nqSxR6*cUAng9#wkiB*AsLsVaez+WZ98Zbiv5&5B zk989S_Q&2Ook77jbP{O@@pR2V1(9qCvsqUml$Q1BRP!95WLo71J#4e9wTeMl*HLr` zv+32^qnzO`1%CH;hu`g9wV*h}|B5R>>h;sbfE_}w6I6yh+he<0QG-aGSJ9aoNz4M< zv3EOk$+_xBS#MHQ-OFLdbqK=Vn@<@&37e#)fW;IsX<_Ygk25qt z;k%Sf4`B+t^m*|ZB`-GS-sxkVEgnCp-q3=Ix${`Aaa2z=doMM~8E+1&<;$h4XXg+I zI<+F2kB7+~HDlS?_3KljmY}}bDrgaNLo>Z6y`EmCX2^fVZi(#FQCVdA0ScJGV?jb{ zdepyyKh_#SI3-5*ej2Un&}J0kWV}GPtqG|`4$4VKw6|r0>ix=|HQ{HTIX=CZylE3d zisb~oOAH_=fO~g5CJJupLkk61Ydp;Qx`tO?#)dYDu0LR}*7^_m>l~p!ZdXMh7fDiHj;_d`>sN@-dw!nGax~G6lHgK?JYI< z80L4V%1sFEnmTk#Wr@^g7_D!m3A(y{%6kN+zX(0^5^D5yj=ok)WX1kf{^v(9WtuE% zk4`E66V4XXx_cMI>vO2bo3N`ji7qJW@`wx_}Yv~}3ku2!V!FMu~KJBa8}RM8A`kZ`qzm z;X^SbV%?9NV!p}X^foATzOYN*nE`x@1lh_r@$jb`g}=IuqmK^ctoX z{Pnw@OJat?WcN;7s|aoZlDOW5E~6`szs$X@xmGWLn{y@gaUh34>dh~hOPo%mO50p9 z;A+=lWC1B9^AHhSKsZfK*AZaqTr@`LcZOq>WWEXZB!@S_*rDa;YFW(jnAKaVk-)Ol zduF06IDTqT=O0kiV zHF@nSJ#i8A29y|oFw$bm@vL!%W<7?j$;YQrNrX6r55wd+z(FEf=S7XiTsSYXdlwN~ z*FlpaHnBAu9FGlCEwXD0v4A}+G9m;|DYU=zIm=R8aXEFqkXM9syD71EtyEHHJz+Hu z4*gl65tADqmEu=x*AIdn>}SfgCrDD?Dp{4acM~x>iKA~Q({y0b(TeE@uPa=kR=e-| z9Rnd%WGF--iRxy~7nOl+SYFkkAn4d&(Gmyr6YrV88+`3rm$ffdzydru1ASO@irppHMGQ8^ee1X=rfuBiH(euux$yb`+6N;$m$EY6 zYp;M+sJi(lt6m@f>FU3s{^18J(TMW!MFZWt>scYp&D<`kgD{-Gygl4avB2vWDycz& zqwWPhzVIQ)~gzq>GOWpEq z&t2nU2_@bHbbJ-h(DVRtgfZUC7***L9INfE%(F?neUsD6nlF{`e!sTI;3b&9ND#3lx>e@zG|_^BdGS@R8xta)%c_Q@pFyDZLQ+V$N?Tg$J+?yFn4-aeN&DU< zWFK8UVlrzJn4r*^pI_R5O9&fgb2!>E2|dq(2SF2%vi|EP)vA%{#tFlaj?y@cyU>oH z_@)t}iGWE67u+`H4An_Le)!t(Q!Az$;W^l-|GYK4@G0B@Sz#(91XFYdS<3MdznO+c zbAC1GuVe^qxw`2*wj}$9`V-;2=mSBPpEH84wg(C0x3~tYEqNZGt0OcC+OOJ7d;uf9C$6!I*jyCY-wP zt%dZE#Bh2Y?0kWTY;zkrPOOk71FfPywPcc-sPjBmlq5kw|E3vh7)#!kdd!9hS6JoK zS^ywvUfNZ+n%q-!QH!u7h39pkw1O^Ylcf^4w9Dk%+JKvCW#;yG_{{+p6E zD=@zVH3inTo{7G{i6RF^DQ_H^KDf-$!%eNMh77@w6S+_psaAwo4m_K3YM+W9lkGJl*2{4rOr`4lfZZ z)XAbd7j$8U^lBL}WY$DIKN^k#u!5#o@2mQpAlXeHx=Jpmo1VHQoY#8p511UF_~|G zB=Bu|@s+wU_hS_~`>+rosEc_j7}mT?*1j}dF(a5q6u-PtSAb~ekJtbL`$bi zJH*TMrHF?Z3{;o0NAkUH8STY%`}#?RQBT4`K=>{q5weH11gXV!6SF|XoR&~U19M+@ zylpKz3Kis3IrLU!HXBiZHW%33Qe}_u{CM|u3azROC0Rf7E@*8Ly%1JWc)MMKAc(ebW;?-Pvi<;mQm%Y)am!>)RFW5kN$5xzjmVU8_7F?M9GH7nGQ{6ZLn z1h$fm8)JKZ%K^q(=-e@Yo(hp7Mm#$^LzoYf`>>m6WlS%3Y@Z;+T|R`t=gkuGqA^je zUK%b_g4<++y&@ckbf=D&!}}P>`)t@R#=?)!`g@}7gjOYFaSiL<@$Qzs_nyzQ0i-y0 z^t`%Iw}sTLoby?f7yzb56LsaF^h~eW6|8&TRB|leSxs-tg8$a=U;3|`(ETMe5dIat zVl%Rgb3V&@)g4T=IzbYBSEm04aZcdf1t#R8TZxY!=oHB7?xVI0a3eBQ;Rjz5G7%E< z&nO`bhde#ai)j#v`VdV%6jck;4>L;+wZEmgE^LJh&)|nLvq=24hWo7{JjEXMaH&9X znn^?9hfgh;r|M{ekT=C+aWCgLq1%PXzKf1b&8C;C@l)_!kpn}&rki0J%p3)8;US`7 zxd7HzYi*tQ$l2G=1k;*E^V%`We;Gy^H|Syk;s*jbnV*}|CZ37u5>bdhL>&|{y%_@J z)k^vBaf>`?SGVQ@Xv)WJ4D^#J!c`U{wX&SP@^*$vO+o>@i)DNcZTYL84|5CNRp(_W zqSgu3`p*PCrHJ@*@h9p04s3*G0c@K^I;6J_acm**SH5mx(vMH4-ZpuT05eGC+;v_R z%su|d*gKSXtpfoytABGB9ei-6gYxrVG6J^_N^U5p`rz#eYstm4JyX|eUS~MJC!!4y z_EEtzfsODVCnp{*(_A7N9@XB-tf@rM4^~SR7SPFdIlQTZ!2uNSOnMP-vpNXOnk%I) zwOVTkcrT*O@-IrtQ%016W@oX`^|&%7lgE ziwcjp9q)uhAWO9Q-_jQwtMK<%|IOoH@&EtwKlZ=Ff9wzS``R{iLIS%;P6@BINs%o6 z_P7p3jyq)ALzxF+?9&I67_>B}R$cCDNsHEZt{&fFPWR!cwrZC<4RSY2kvEb1!a4eA zm)F>KYW?M9PZ;>)4}8JgJx~*~z$c@D!W8P`RVIbMs1Ex z2KO{xfQP1K@nqPoo%8%`r-}_l5*i*2EEPd4U~&zyR5Y+8Ug+}%dC4w(e7x^2rb{mc za#s5>*r{Ht2IAdiq|Rj)>Rg=%;dQ~L>wQ#Lb)rY$@V>0391CUO1TSd=t!C6UT{4Xx zJXa^YSnU%IFht0qSkVt6y#r^|vJZG_mULZ=jmc`6Nd5O_4VKW3(g(APCDU0MIwiYb zUp*w2TpwhB5uJ7WpY~%o?|Sc$eZ_}q6=$*(VjTa1zM1u~)@<^&A3PHBE~G&YX1Sm1a^z{(@`Utw<7Vq1ms4)ii+K(KQ=}4!`6E_K zou`Fq!&!|cr7wV5W`#bcvZKA0L%loJb|^q`PlHP%GQmC$^%ki@%BAxG`NkXXuSmQGpn@>2xq}XD; z6{i62$8SZ{%;tfF*qgr-lGCMFidXQF`o1b)>^;pHXMtk&Dk)NwU$gAlkI8V>uWpMC zcKkp19V}KK)^j8#Ej;`lWJwoae50?tP1a^s2*gAC~$jS8KG>0S}$bi3lSSiG)+{a>+9Tk)KqW7C0X(6F5XCp!S`>OU0#L5=b z9zkj>W>s?)%^hJT_^ZNmcL(eJ5T?$P;T^`McYU)8h6BT_rBW$*9ol^R*SjCQVvtKh zPFm2}SFhX)SAXH{O>PDKzr}K>BOnCaB5uA$8lNnMfbA6X_~Fl}=P%>?-d+I;9mjx@ z6K_K%ha^0J#CijuVznBL+$yFMtyzi$h^mhGj`~IG6+&#W8|)g!^A~l%fYrJycr@(P zVX<|y<9fd}!lhkzLwm%P>RBwlGgHgD(<N=DqoD#BgQyG(7YFOWM0VN0wb@e*PQAF%R>0racO=DY4l%HrX1H3IK(wLN{&%P+e+W zMw7T_7k~tj$U>ns<8VlRw&&4)tB_1lvLa@Dku?{M_0U!Nzx@05!P?)Ktw(nR83iEo zoW0jxdtJY^7No09iGxP`VLhVtNxX`qx+LvoL?CdEIiS4$Y4z-~NL5eyAz;QhK8@qG z_xg(ZXf2HwCtCaZb!yZRxa5i(LRk@|cmUe)MAHwbx+>!m)WG2&EdXcxQ`)Qyra00y z1hk-EbudzTlYSY1yt^&U%JvY5Gy2C(U)Mc-<`M^VqNj*!iW!_(S73r#^8q8kg=axZ z8({pfn!W%MXT~cXXoeq-N3E5+76@`;2D_SyW} z-s_ZS5!LaNiBNL*MPd|eu!<1)!yPI0F!Z?~(=^SF8xBs&+?Rh1fBqg(c^->GtU{%J zaj>%JQskNweyiHs6#>7n;ksJPw22Id4!c5wLg|}1<_^S%{jQI{ks!v#j zz#-|MG9Wog&aJ_rHK88Kx(kteNnDXwepoaF0l597v`zV^!+lr=l}M6U(~(Be1h!;0pMXv1TbuEix&yKc%0Vd+|p zplyf(2S5~72u}gFf;|58_P~(ujXpkuh!&))b&v4H+82C1Rm11osRO1j7Z<8k`%bGI zx*>D2mIb(qS`*8xXDN2p(7NI9uFb?=`c%EY%}@Ef?2pGHkeQjgm{zr#P5gEJ70q&q zNmxI&31caogz5y-Prf}~_2KYwE)SpzcfTklAQZyEJ+V-iJx<6pPi&*GMKnd@yZdcy zIg3#Jm&9)UdW8RNNj;6c@=~x4T0esND@ydsV1JAQ+&3qB58!NHBx3$l1QiMIx$(g& zSTi_Vy%RXyK+`aKggLJ9=w6B7zsC#)^W$YvTFVIMnKOu0EQw~Wx4->&!?%B5-`$0k znHd!GN_B#N`R;G(1Ml9@CFRV1J1R`9{NFw}eqwZ$EkGScvcop+zNeC+y`t96W?~^x z1Fh&aZiIOCw5OR>&w^wK`re*18Jg4ey7pxVbPw&_oA-y`GL6UvFD|h#eX({Cz%5sB;ruC zNreW2@gqp6;dx#JqH~vfE}+|Yx=bu0JGn21!yDhmJ*Z;Z6g~*e;!dq&kKODYa431;Ch{Ugs4NF*Q6*JEPFUZi%K6*AI@i13rc$6c^ znrRGHZxBE9=E94kt>pQ!3S=ko%h_R@#4IRA$ugSuT&PkvdN^_~R4n|RR*wyueG*O8 zkNFHvK1&GRS_7OCI%FiVgWIatyM(&WcVzIGy=NpzQD_;Beo|n;q8cPpXzNg))dvU)s}e1vSZ&enGz<3 zQKRm#(66M~_ON5>t_V;i5JnvSt(qwjW$DJPy33ll+&{%-GQCX%cj^8MIYMNpm{EZ& z7|Ns(Iu-C_TJUn-2vm6|y9>DcwkIzHJ|Sk)>8%1o$!Kqd2D0Q_)N#GeSVuH;Q&%Xc zY>&B%Eeh<@)ZLVL#$38IdkrplP)_ z48E}c2+c9Z_%_y|AF&E(&F1stN;ZSE({@h`7HEN;#v^x2*-+GCMk@D_ggtGmrUJiP zT>aiZ8v5LrBk(w=hvF?&doOXX#$5q+HmZbpvZ}R<= z@`H&>nN6Hsj%YXPEilZfD zJ9uCmX4Vs+&pApRczqT}-$nMXHU6spLVu79yQ605H{cb~3U4b`#K9^NQ%6nMl-m=y zqixj$63-~we)LiHMj?|*Ss0LqdinbHfqT~ojwDoHofE>9xOH~@BCLNEPy(7fD=;N5 z#7oq!VXsp^1p}G8XfZa9g^PTIYGzpWEsWufgOrQ5Jzp^46yZmp)^8dstql6)@IE$S zuo>ruk@NH}3|c3R#GKPw9fl7}QJ66Vt6Az!~9l~#(7=0FM!D=F9vQN&lyAyV% z7T+c&*8)+mL`nm!sXjqc>2-1hfWzW{g5??%qcD#Cf7c2?o37{ZH^G7>{i(JCid5Ji zRr98v?vNjLLV`z<)WKV`ZzT3E1%*wAlChhRbobr_3|NAI>a9lw2nm; z3(6(#%ra9tXQ|W10e2nRnVXQ^G?ywewx+B<{;W=c7%#|#dbHB@-3is&2IE?v1d zq;?EO0gIiuC|N%Ii_2rAfXGYyq?!*l%$^_-@J z$=|?2V}OnGFmzWZ4i3BGW!7?}wejtW_R28$0pGL=qv8&l#?+}o1i%^oAdHA5Z69}R zr@)&moKFR};aet6O}7ucG64swS0g{OGSF5u%Mm1Vj-oz?h%;%fT0V>vY9COwAfASC z0O|}A0NI5QS6hJENL2`$I2eKM1rI%6gjc!_r=UBE3E^{M<)kxWKXkHVMO1b9l)jiz z7k(A-;{(N0DW@sgsw#GOzeK`&zxCem!}>PB;y?fP-w$tpX(G1q-2?6JYY&()cB3@Wy%rau}vAnoXH9Sbve`b7>4N8S=;P5=*wGZnZd=$u7Ruy&l-xPenbi| z{rSL;l7ea0mzOaNEpk-MtfAV4oK$0qKF{SEEbT`%H)xq-C zk>e-zC<>S80O@5RYH*OTMf=lPg&(F&AkEhTNh53owDc~NRCdx>2|X@G(Tk_whJ4n{ z)c_?StpMZIZTr{5zc%&o#xp%YFbefuYb1~plCY&OaTe;YKpne1sES)owEjfl)z`K1 zeOhM7O{l+-YwRyTKKIdQH#{2C!%JHe#}tF2&ICv@H>y)xB5S2bdbDws{ra1F(2?cj zs3?D8nWz?gupj%Ymy@oSaPT7zFdcyB)t;JcBPLs?jsDyozV^pkLN$HbwVN@z*85~( zdqgb+A+Jw9^w%fH@->4LEnj73I>kppUmNlQ1_*V}6+pJRa+CH~f;U7Ow zUA-m3yS@k2Mi8TBwMM2fXzABP@*W>XixeWi>w7>>#da93)5=Z$;n)hgk+f}fN}shpU2iJ z9BrBJR%$r~t4O;B}0)Mv+lWTr7YB=(3fd*f8^p8my{W{aaS_|x%_G-Q?Tw=#SBUi z{g-#YsvpPseBmGheCtk#EZU3koF7nIowBXVeuwUlBH_nA2t~e>`|?_35@So2z&${m z2do(TbYLERh+u|8Ou7aq6xZrUU4Tlh0L))c12vl69N-D;2T3;{_Z^hNCq33@eyvho*NIzLTce0tEW)GfH?$*=>w?u)gca~BU>XE3k+n0TXTBTUU&U4k ztEL~Qa-*y(*QXFH9q)i3jv`y=Gn^7!H;upIF40-syvqa19j+VL6am88%YQEI9_A9! z=vEe&0T|SAoE9ybplkg=6L>w@5q#Ayk`P zLjg_0+-P^?;VW;F#e-IA^<;2 z!OHS;)x?zGxmUA#azRpX5e@%{jwm1#CrMyr=%^iH4m5;6>hha#O+N(L-6w(h6`WFK zU`r4n(v>~js(GI*((7A8-5uVCyRY2dC&f~G>%+<;f4Ftm&Z%~-WPwa&wC-8KQe#I= zq&Yx79WDJKD2u&zWX^QZ5^~cUC`BWTd{jkxu&_w=oOgGj^2AG9F}^P zh!4y`+>U4_1@2X%i!VnUYd*Y)Fv2Q3qX-z#rPs59w>t?BZcllSi;TxTZl~Ari&R`c z8V>w`9rbJL0i_nQ^%98+e&!3aJv`go^pA{L+*_~DipgCj1fB2_x(Y#IT)^SnpCYO{ z1J%E=B@zNG(`(5SRunINe7Lu0{ZZ^a` z^5?8e+5-;RY`@#2a9kRg0GqZK;Hr1fr+$@mXgFmTiefJnP_&p4dooj;<43u5$Opsw z9~6O5HxV7!8`k%W12z;=d1axdP-V;e`&Wu-pb>L25h2!&>GVlrZP1>?xKFhnLX03O z{25I+Cu`ViX7`8acY&$fM!>{PHS6?=jNaguJ z8m7tKS5uQW-KhP7o6r>p;AV20rITrl;|(34mM6e$>NOHJ*v%%NoN~>|2_lf9R5CT( zOhVtFl`ZM;Kj@G<_PJQHU&i^%7Dxb%mgdjV3!Xc+94!9yN8a?qfaNc+w8`SR;XW$^zIG7#zX%kjo_cSTxJf9QGlpBE5(VEPtEKUj zT&JbP9;yfQDQxd83FCAdU|9C=R-t(CoR2@eiQK|W_Ah8WdbiYj09_H_y%pHEh&59r zq^{H|VVR=EKej_DlaBK)f6Y7saB&jKd&G<@ty}Msd)a3V>-tF}7d_Eb7)AAy%*Sbf zMGfAu{F8x=2!!dfl0-3~b7<)UZ-`eUi&u(n^IW?M1k|h5j5f0i_$o$&poM%3S_K${ z?@zuajR!`dp41{OFKYMFIB*A-a6dF~ z$jrhP?5vAgM@;HHKZ8vPKAkvz*#DSU( zpCg@kcfDHIy2Q=?5Ks+DFfiE#P$=eOh#|j9wT9}5<|3F57^N3IhDEUhKqnKDC#g$|7?i<+L4N1Q!F6DZ!LqM^xCbEbHOjJ6h_(a}+@BBpB7qhf$&F|jqAc4G1JPoqF$*dfA2Y(}Pbvbe( zFD3mtlL!r8sL&zDaO_j`Zx&}rXtHdC_uDB&er3;T4&spDY-JN zt@)>Q=9(1qX=)LdHOx7T{bvVM|C#bkWRP4x{%3`nqloj{zj)jJzkxjJhY~sj)3^%p z%#&B=_&?LG%G-z=j|Oi}VW6_xQD4JACfL@3Q52Wdz}F79hmGwgzJB|dSA>rJg-bj+ zh10@F$IWe;$9vyW@9fb0=rm_VC~4`<6d$)dnqNmc<%pzCZ5SCLq2dPv!k|gFSED2l zcvzn|*XTtz%MW;M35ySg@7F*7&G2u02n0_KX{Z+0V|&yR~12oZl?$}?KobN}&x<t&(hB3iy$*gj{H(rxs;2vg$WT+}G54@e_sOv4 zjU2#V9jGTS;HKk1cAgE}k7~icHoX6v^?Z)%Mr}&EDgM>4X0ojeEA?KD`pD3hj?5MQ z3fBJT`kpYpfm9>lJe|v^BVOdiZUg@bSq9J+D$Tu)_lEa^Z(%2mC$Jm2tqMFLjnrk% zS7hguzTnHjzUQ3Qtv>B{gHga* zLgTt34Nh=ZGN+!7fH{x&$P*DNb|eWJ;s?R7;N6QT;$T*&dZ=1hAcO!+*Yh>(Vb`bl z&_pqsF@RaX=ZRF?L^6-|WEM#$HP1VEhq%&bk{6ue8?gJG*)Dx^CKu6{(B!Z0B?rGmN0G7IZvGTJAH zMXn|O;s>eA`diDZn$CCaviRpOIro5Ie>nWuub^XDMBVq0PTU&be-!uaZB}8e3=R#c z$Di%zW*^c2)$eWbwobb??4mCjA?LzijL)lZ>PB0Bxliui=#-^eQI+{gFR1@&2@!~w zO5?=1Iui_529FoNDbFkKAg$ajF3y}-PJKp&X{y?AD(8;AjZ4l95vI@0jZa(u1?Ks6 zZhXMp1U>JmXivF8dnHt;pV8ps$c_w6Hcj4Hl0d8~Z!Fd@2$41~PI?S%AE(DB%*72p zyF14bp3fxmd{8vKS;|DLK)jyGg(rJ6H7w?_$BlQSdOoJxnrKak5aO|5Iw#}yx=k(& znSP=QR5?ms#y;Lu>atZ#p1Lq-S^0W1hG77{+z|uJGP11>e?I8k;Bt|z!=n*+9^Wf>Q z<~NCj=&)%<1pJJR&Hsgx+iZp8C3RF(5J)ABuOss2;{-Xu6|{GUr*nhX!l}=?A<{xZ z=hlR+L*U7cS~v}%xtY_bv|bJr1n-97)RPrzyXRM9ge|fakD}kVJ&W^HAS!=&3+LnZ z-0KwhKnulZ?q~y`uv}Zy^$B+nZzzNtj$Xfj>PKr%Jz==aVR$xw`|i*1Z1{h9HvI=5 z1%WV4eelbkDtnH}<|XebiPS@Gr%nI@ICv2Z1%zNSzwAjFxSObV=TyBULt9fK}x3#z-DiJRAHPQx+627J`W=p5GShb(77=x7Ul0*<( z>*U&RqfA|PmuV|&=T$MJ!sDNKC2ZxcNYW>ni`Ythq@bp?LEpEim1!xSxjHAipip+BPx?Emoycv-n z?`U#yhPa2XE`v+v%p_1pi4|19kx6T2P{G6PLD2^*kItAH<- zIuzf|=p{0Q8l~&=abM#YpLKl_k_Dtzo!$@mZ_xi0Q~lxaJ-?n<2HkG`v#jl`<>b5+ z&8nM5A&Mu^*S#Ex2_%=Svp37kmaR_*avx$uNDxR3z@=a^ntpNA<{hfd+F6A1 z#l>u;x^@^w48ULbTk0E&?|<~dDjUfmG$6OY`=SCW^OZn=&@GN%%hK}$s43D+SAtl@ zS;~?|fVL$eprs7wVr13UTy<7_`=`UVzp6heWO&jY0;ZU8ghwGO)&&$9M^KrUNLnMj zZbJLF368unBvL!B$U?2ESRfjRqa}!U^ti=*2KuElp{$&|KYn% zN0Ai;Y_g&*<{hESHVFIS!qnfHbqx_ag|_(N=)Cy!r^%<+GU;Ng4JV)z@}cOA#{vP< zXtgy1zv&F#RKpAPRPD*C4TodyRUa&iBP|JBw%+r=1NCHb8c9wWo2fH6&-_YvfA zqsSm*7$A>)U9jjR+fzg-ceu#HEtpO+5S}chy0SJrEg`a}sTO*Q*qgR3M?gNOvU99H1W>ctrDu!cpixxZS^_ zu@%V|(q|!u9tr9i&lDq=qsX^{)|hu$thUqKLhp~T%BLTOng?16QeQrqA3|PKrB$61 zzyKaa)QcgJ^G;eNy-JqD^>j@`rWD;N(EF%}!FEyG$)y10t^!~)W=2skvK_scW37+q zytO&*{KH&4c2&u{K}8x^kAmGM~cQ=;qdOw?+%-mYl;)%3AX%5onpfTcH0=# z`kqNbJeRFAJ+!B&)(Nqz0?a2uM6J~X`FRIO(>mh~icq}K7IN)>1KJUmB1FJ2y6xut z`H31Ag!{WgD$K`@DUkThwdswTkN`dqX+A&w{#UNzU3mEOuT0el}e6TnMZAPBB57+@N9!KWnbI&q)t1 z(NZzeqmKyJv~mR>7S~aM5^i=e3gG68i&7|G5Bol%ua=CN@lDXCYjK(BY{ZLE7Mp@- z(-&*Wt(TBNpId>*<4|Q|f7%E9H+=Ac`QQ+Szl%tW2B!Ma7y%wSMXG$btPLp;9wjZ946259?<2>f+$7?w&ss z1AFzGvK##)m-iybMgg27fJ|tophbR_pQtcE1BllmlqH*X8GIw8AfWR4E7AnrSUiR* ze_5|qM^Mk#QkmI*9G>#VCf3TnIQ*iaAoXl#{M(qoV@Z0gK9k3osneueSPKIufY zaXgg@sC8j-!yVIba*5!{s_}szgHTbst$HZ2)Zym=|0H01vs~aA08j*)8mQEF$tecMabr-CLU+BQhWPiNq%~N!%-d z3(@+1A|=wVeSgGrqV;BBFNlv)ZgPF+1c6S4jM%~7&*zivB%ivFmf?3rR9XJP3G z9?*QDm=S_lI9}fF`S8daZYaCfRlutwDIxX_CVMJtH-b3o4R9AZ3D8Dz*lb;>+@Lo% zA~G=aS+0~_`t2OLK$wu*zw1!INHTDRF)f|wWHfa&@}NJXPDYM3j@}#{Txl1+qHo zHwk}$j?DE}f)_~KY&!Fm+Pp9tv)E7>adbd^0gp}y*d}*kJxn0ZK8D3&EQoRFB(cyk z=}s~n4uKW9Dg_DvvJ?^F2Bf0dlO7p(T6RL@wNdyf;MaFTumWJz)|qYs1Tu#i%JUT)Pxb z@+ozVQqrvrTXhg@Z_#xXlgQL-uZMf^0>?6%G?f~=%Q34*(2XbiEj(|Js6S$r4U^)k zIfH+a{F86`$DnBhJ14KO2uvhg`yYR5nfQqNKr%0;eyTiAU2XgoQ1m&=LWZ;glHHjU z#C@keE(-+nu9gNspDBs;6AfM{>#2`QeJ_Qes((Xh#uDT`s(os&V*Q+LEow#1uB-s_GFP}EwOE#~92)mv?Wyni$_)j9eD$g$ zzXcuW7jQVc523H5^hr+09Hpku$;EyB{xNWZAZ+-JTY%|`32+@cWZ(%I5A+p!-> zSn4tmS-9hz#ZuOq;$1cA=nI}*T#J*t^@GJ}BX3P+7z?YV$m#Uae@(!3*0X?EABW^u z%*bS*ID|}sGAAR>OBY@83bI+;z--(hV!Hv5o~bJ# zt+k5skAOyadr$Bfbczw}dH3cIhu^F9M-!x3SW8ZVv`Cz?DFOCWt<*x{Cj}RwRqKh zbS{&L34L;;Eq-ga6x4=6se^i&dOwY$4$O6n>oaJDapq0o>*B8Z1WsPb2@{m1=mOUf zK4Mk=5Ejrw&vt)E)+8)muWx9@_Ne%XRQ^d`uTT0;b@42O@g$7u0*}OU>XR`1ML4*9 za&s6j6rTB2&F&>M^TBv;Z3WR6z)TyT?jHW(c~966$QB$k6MP ztj2}5nE{@kydv@@-D;-5347%wac4r+tbm+0(|^g|id191`OoX1MAWeLaW_<1WvY|s zK+zLxtuIW@&WEts$f7_SC|%Eg_hy%%{DEJ7fExDm#+(k5l&q@zylTkirzl#}@o^*u ziK|+!V~oU=Khy+~;OP}%#whuyB`akZYO1L%OHTYPwa+iXMM|!>nnw5Kn3S)OVy_nh zsCak8XS*@SgYl=#Q-TF@mFgFaOjknctdC}#&M?=Hdprqe4M3H(--)vJqa&5A>8?)B zW~~uh<=Key^YU@k3K4Lkvw(mUsYCz_wF+! zBy6|uKwX+K@Fj=HJFYU<3k?<>(`cndWvirw@9EgS&)li#58Yo&1S+g~j0qlE zi~tf-$q{=>q!o`Af#4afk*1uzkrjZ#`c~YV)_if&*;T`NbFex!Q!mo4rhrO1|0bLn zoQ4au>~iYnnL3FUC$8x(26bAr&+MXc75cjO)|&Wul9Yfc0B&yyt^I zK?0G>z*O|_W&rsK8^E7tKS7PhWPhH;OLCago*^J^kr5lrTBUGI%KZ}>ZcOf8bWMjU z*D8Z~$*()@Ld7MA5l4@C8G-K`*)$1Jqj(8ue+U|gAPky8T^z$voErkVSNp(zS3Ufy zBJdWj?`GV_{;+Q|ZlfT3mY(c|^XpqH=s*#jUKMq9ROK5{4DM@h`lR9=>>Z!;9Go!W z?l8?aCrF;Cqzh4eLE%m+OU)0x7vVS(b``;|fdlU9$p+(TkqxreaUC>n%8A2`(~)(wV6kCUx6w0bmj`pL8hG%xrW{3 zC+WBpBL!Zh)M^)DKbRER%TzRj);uS=Sc7J^Xgj=iQq>7`|8Qg)<1O^w_H| zS-9b&B||E!sErbS7*kun5!?q^u*8*gO5mHd;g3E}a1j1yBo_2A2|GXSF{Lk*LBFcs z&?TBggR<8}av72s{oUGAT?Y|klcphpr$@fiTs2thNX2j_!oFwg;FgA6y{~pRz}SjQ z_Y_uNIbKu7r4SqMolN{D2{mY8cP!-sz^wIE>I+TOaT&oQO|6;=z zJ+8I$Xjre0Hey%`{`3dKPk&I~1NT2&uF+T_(*-m7%h%lWNL$jJP4cC|7;0f_n=X}X zbAZF8SAqh~pje@GjGT1Enp31;p31+DxC8q^`0=t*iw&h7rvEPppJyS_0hCVjAKUqx z+NrWPY~-XirbVIm{AVF@5|O8hoMIUA<6~uI9V0;#bq{B4#VtY%|3CG^p2C^+Cd3f} zAbhYk&xkI=X#qZ0uW`7(p1|>D9n9tl&23SZMF|p?w5hiUxmFlhQKx!oEA-8dHx%Y3 z5`|zb;hO}vp>D6QSyMfM4eL0}k(fb*>NTD;0v=TBAf6oVmU>H++8v4oz1(H-n6e;E z7WAl=%3l4k;sYoBI+vYm>MNy#*WGU!u|r|{Ywb$TU`iB!ZY=edQ;e|w0y65l4w)v4 zV@}Y&SW{y7ve&w>m+4YLQ)}Vv26WdLQ=K7X7(T8?J-|{^32^gV-lLXOb+)Y!NC&NA z1Ce9t&XsU#4n3-bZ|FGP7BPAqy&+UYb6#6T2feE00P-W5O486*a&19@2d#kKB^}|!=8jgwST7yCZX(|EC z7HrCtBvt_PUXd!Vuc*7+tXH~%`B-$PT=15@6k+FBziCuMH`N`?8z8PGSH{~U>ZqQ( z@|3&`4BB>uGp$8)v$SH)cn;;fkr_Y)D8Phq=-2I$*I`}7N63I~2y%VCgI*I`1%j`# zeVn|Pi=u0&8K`Co9Sl2|0)`?YiOPh7zN#IHFzw0Ajc$ z>yH{cHFW? zP=aHw$J15d(aOPkkV~p=Qjg!Ixla9Cbv>rt3QZa77-!eG4t@{a0#%7C;TZ>oCO1+6 zPfPSRYjA6bLM%(Ut>M78jQHar)%E_ySdb8jQB~M()dI<7UZjaKHi%}fL@@%R%ZmmL zK=SFQ^<(2O$At0i6sZnbD44oZ$~M>JMn`e)u7V2*Ay8kikb#tLp9D&<^<%bn<5Ive z)HwQ3)UV7?+k>d$L=a?$e&lvQHyEaWm=8{ZNT5+NMKa;T{#aqe1w9n*Q8pO+I$jJ z!4@&4((ig9G27FDyo?1K#@<(+jyJW0#k^vxQLcaA$Mqq?@Ll6e5 z1kh(f7N257crWU)Xhn?Au5Mvc(3Z}-YgvE$25|W7qOK5W%E?RJWW0$wIrai~ zbSrTJzjb%7MNq;hWhw#|2Ez|5!a7C_?*tPiVWB5j{W>$g^vG`~9S29(|8+wh(JqMW3rD zfvBOwP|rBAdyFCz`*KPSDIWl&SG35M8K9Qh;I~BiVI|3|s|Rtdu!_)7UJ18TrnRq| zEVpT`oni&lq6Am{Mv(z!XtRp{DQkYL5kekZ57o%BK4^wP6742pYq};VU}#gKLU?nv2f@8G!mV4?`;a}{o9fEP`IpRLc-WGQ zx~og$-5{>6&nG^5o_#K7gfOXnhB2u~e#uZAG?uxtAx`32p9WpAz>*A9m#^?|QvQ+ff`WW_svj?%B&= zDry8khx=Go$$A~eiiMi>b`IgLo*ih=n z(OR}=cLhl%UtHMkSks)CD@JiDZSM8ry3JiBAz|Rx(>3?N4`9F!@ikeML|aW1kdg}m zdXbh`SEyvx^|%yC%vCzSR|!N-eg6J;{u{pY^Y^`)Rs0ICTXoARp#XK0Td2QJbQ4gC zwy$E#sDB<+bd|>?i_3lyAwYHND58@3yT7gnJstLJbfH^#meLOrxISep%(EUqXZz7v zsSWaWpKMJP+*@C-uL_e*JD>VdeT$zZ2f#6apVG71xFSAm*>v%cBnWc^l|cTQtj4Sh zBl<;P+L0kF=|lvAK`Dr*`UOovaeJ8M<%?g;&+CiI%B>UM=$99{h?w??=`j7o8vx9- z&nmLQ$_ZW?d$Py6DHP%iX{2drTC4o%p%^Zw=W`MM^LsZ5PH^Kn>W01QLl^+y4NP=AIT*AAJKjTBj+mLVOE)l=2tdHp2}r(dnb&r8 zvFk0woVniUBCX#;?!F_aLk#PXtt zW#%*Qt)9o8k1l9)P^`b#)UXdlC4Jr5!=A)s4vz=4`6#*k0<72xW>LiLj_ z81s##%l5krrL7K;DGh`3R(1)DkQ(zT5TEv6H>i0b{%xxh>Y|MNxgywkSl7&%Kv&Qb$AVH3?iP*r-UUriA(t1~WKu@+U~(!Gx$ann)X z0)zQsZ3Z^*SHJNo@~TxkSfffb(+N~5Z4cO0Ty)KA_zF7@4dq|#arcfEv0o}1HJ-Xo zozv2}){jbHq2av_9H(3Hb1*_Rk0nzzsw-BSD<=rfBG2U*?>)H*B%mb7qx{fsFA z#51Gqdw4k552+!(;G~x5(Ru{k==#)iT8=ni|8mAM=ZF2Fym%NK_ek%Egslka7zaUH zN!fJ{w^N1P@fM=~^Ws@ZpVK8TXj%;=-xJ`#9G%8G?%X_aZQbs=rf;@j9J0dc`A zz7D!<2)9ET!7_o|A{BCSzRkX^Da!>|1`C*nRyXpn<6U2($IO6pX0nf%ilxc`G$UTP z-?-AIChHcNgv!r$GBkd1;F)W;ht zG=d#t+bOO@-KDpGIehyU#f~7LyBCCC4r=Kv!VjkxtT&nN|&=_0imeJ zZF*?L9?%F~t8Z~u=d+%6q&sZ@!6_4h@m1vR^`K`NA0)EO`kO+qX$3$u*f*-^hdxjr`i3Yh6$4l3{P zv&HOa8een8Y~A%73D4P@K%`JZWa5Y_hFyPt3^lL1lSy*pILvMweIy_^pmt4A?zZQM zl>JEQ|CHz{9p1C?Ef&sF#S=a;?=Jpkp+M1tT}~OSr_<;dgq7j(uD6NdiVn0g9WB?f zd5x(f@8N11q0%*#P>8c?&q4xa(pp30=sn>Vt)Z42wmHm1E!7}LJ*A-PxQGCkpL~+3 zRDy+y?|FSIG_c22@M1vy_X1KfcAt73;A&r`s3mt5iUJ82OuMQ!IHbJmx$6R5bj=8k z8p&@P1H(?C7kOUB=E@OV^zekhSOjep>#G&DS^>`7SLJA?3~$O^s*;X-gqKqFkL$GQ zp8RLSxBpkMQVZuHqdcRy7_X{FeMj9Yo%wq-#9Ht5(M0ngU zCjw<~Ani_B+1XvZ8f-G~vEcZz7#mgSJAw%xx>@-`%ioLH;bBWf*<>Lx!#zb--9{cu zb}OMaxWO%x1kJ;j>g^bW`;7MB6#vPAYg*YWUQtf^V&-2clt_J_PnOBh!%?Y&C&N?o zBncFfy>uRs3aC%4dK|q_elqdReVXC{|Als-xM~1a-g z6cJIoRr3jdeIE^A^Sw23cHpe3irRo8VM{RK4Yid3ewd1_P0+04*q8OuirY$4em49l zdsuo>u@^+I7i%MVKVaS`NJ7m73a}qKJye}SpM82@-F zTNB5&3DzRa%I>gHhpo)l*M8;^wh-Isn~+Rq21gM^uy!zBrYf-*6-Cq{1(dtG%4t21 z@tCl&EYpFSQs%>BQ6#1xN((#sREQEUh7w}7{5k}QDk=y<`g=!sEJV>kOT|&QiN5&@ zrd1G`jMrVaJY6e{4^hWW;(gaWkO&fTh#(R+1;(mbXbajJ@Z7u{Q}y?UKdgj7c zr4*5%+fl7qBu4hDDuw6M`tVX_qN#C569Hw?&d%d0cbN zSHr}oGUo4zy8BlQbENUw68@VmD0g%VG&!YPe&ib>K?P!xMtN~V$Wu#Dc)Lh60#sB* zN4iIoPOc*#vN2AcGCf*!B^d;-0XDvqjTEtL+N_ZWU2r~@tVQ{7IGT@d_0V(7r%;7WHV-sJmMYbB=4@F-i_6a z(Uo2t@oiq5`66MsGUGPZURf&Rz9DXoLS(=+1h)ECT6!{K$^_7x*AOJR6*7#dx7XSV z%(LgvizYso2`M-PCcTe*8(4Ag^`!}DAq0_c#KP4i(n-3OKr%a@Dz zuZO7pWt0%K*vAN1>8eR=uc5P+QN$(gon3zW%i-OxObP0GlqQr8Fo*8S%f>qPZyK7M zC;O?`HlXBcVRPbv0hPR@oS)Py5A_!wBdil8!{Xa)aGq-JLO(kmgyn%bSx-Q^HGN+{ zt26V0b=MNUVFX>6mV_|qTeUA{4DNlEIDm(uX1`oTvN^SYJ9Tm5!rIVGu?5%B7y_INA2A1Z+#+`&g3d_r#K-q9`zSrzpV1n1( zKA&LMO~cq7FaFugGFA+rm?s)16qyY|KYU|Ol?ud&Bx37YwlwBs7qzOaVHb+X5@cEJ zYfWaRA9*pk8|Djo$kpF5geSqRM>KiJ_qBO_NDWhV0s|4baA=5ABvf+MEcH0IbC4zB zsmsDPzoY!FB4PQ9g*E}3=WQgW*zgy%QWDBnAu{JPXL~U4E<)F0cyXMO{vdlo^h%Iq z^UN+N2l*V{8NgF@m9&sx9wQOVrJ)1w-e>~vU)GxB`^1YebL1RZbM8*qf^v|(x!NP> zeP%`41~Y}gflcA2*;Sz*-~P!z{7L?A$RG4?zw1F(Ev4R0TS;lUXD3$@oW9ZWAKl>V z_lIAvi{I*=dVqK)2kR(g@R)uW%hAkU%?_YIv`Z%i+PYo4Nie;5K*)6zrr-8x*j}^b zpf3ZVKmYuNE<%6MM?V@q_BR1m9n>e+luuxXCQG$5a*zg2oOiKBzTu(IcRdhBLB*L- zkmkc7Nl){IoSEaX5YHc@jUxRj7ZJdGjObnlA`s!m7Li5bhanCg(pO;$QC+bZh;oDl zj$C;ijIxH7z#obeY28)-QTsXN%N92_9H@2e-S`N71dL)|x6QW!GLYX0IyB7x*&=jJ zgJQ=g@B;zxICv!Z;AW8(M<)8ukP4*tU&KDaRdLmpq~tdUknc34AA8W4-xSbqcso8n=$N>>WLU%k@nLfwf;yfBMEHaz04|dfN*9S%%#HPb z2xO{$RN?~v+>}@$ZRMY~!L!%OK$b4(92$NCPNOnwJszVp-HR?~8^34W5#j@9$ZWX} zOiOytTa~p7h|zuQR1D&CU!75;m(dIbtBx5pQcIK}tZ;X2V07&KMxl6BfnS9Glt8in z`UsKB^Y1G@wLd&JnGPgQ>w8a82=(vjWQgLZRnBzgEv@P>qf<&VKUwN&J8PzHh3y;5f&CKfr;4dgNBigpdeR> zsc^FnV1IA;{W|JHw2|89)iEe1VzOR8n9KbZw>b+-ydy^tVH$_Cg*ANRp|$&5yS!HL z2A?n1lHI?%S9g15Wms8>zHJwPsT?E>E>bYhKR!i+U)D1ePFb;XxH+ks@;YJ+x{ z7eq}ZQ&LvZTAF?!#lepplmz|hv0uUk4B!5Z50efCZ8TCXZ5-^-Mf_N27NnX_s0cdi z4ZKvAmW{&E_n1T|iF)1|CrM|yBHqTqu(7jUN2$4%lau4{46w+^^Llv?keV(q`E$R2 zcqILvJkJ{|v~iTTYXH2|zlnFp6+wd*{h7S5lrvS7^&zWR+W@A<33P*6{giHk>#A34 z=eluRg99Z3(j>%Wiv`r{S9w&3WjaGyTVO`41omnFwt-&LOOx;1*R{YKaE*v!>qHq~ z{vx=%av*F90;m&!VSZ@FwL4)bKagruQ0K-hj(w@8~%ll4Om$#Qu5bF zyh{h!sC7iPI-rn_kUO4rg5_na79`TEowKet{C0{1)TMhA}?wGy?6kS`)P8< zF6-k8)J}S5(y!L{xUc)B`ii`|(96OWquFaMENz@eUIZe*@$H}25A^u>@FPoVN)*W< zF^9=ZA>lp`Z)zLaQbb~0?Mu6N#732RzHWS5Ygbd{^sbKx=3>xwosY(g=F)2fE=yW| zGlVH3di zWsVha^Y+G!vgK7Nsg6ikQw=9wM#|n>!|9Q_DTZhRSJ)x9A)hgY|bsl&9TcRhWIXo*0&|_>Nuny4RKgj1z`$#=yD-QS#TW9*9iN~`>95HAwlf*YcD*bZO3QQ1+Hot}h^M0h5^VCRfN4 zwpnCETk?~0RB(v~Dogj*BQ!noV9k#p{@1#0dBdB!bUqq7T{UB~SPhn3biAMX{2cX= z)%(Ny(WORS8vye9o`PF3-%>tbq_jEOhhiNo4_*thr4gWCOeOaE=ntj!7r{eon$;H} z(T7G<2oMvsVU=1J0<(su;mE>-F`PA!Ak6D4>>sIsRMQQ%vh;N2{>Dx_W`w2#)iJ18 z2Bp+p6q*Ck_rWnjc8iekVNqbQC<>~FPd{qnEg(B5QMF0Z3gIHH;CEk%2{qoJo=9~B z>|h$t0el1ZJ|}t-_Q$?XoFH$p=6L6oDeLZTKI%le&o5JL1yZx<@b1mehM$^Cv0mC_na*$M zQ#tOYf+F~EB*LxE4jTs1+ud@3k;V#XKlbDr-vu;SmsZF@p6U<4{QL$Zr;d-XAB_cl zJ499-y>=IhOH{zNoWyBGZx^|=EwY3WN1Z5n6s(fc}7q z(lA9cOA z>|?+T^ptjDRXuKqNPSt{r67d4;SrJxi^9LS<(hL2ekiKlBi22yiyS5m*391;ozS~ZZ=NwB6uV5rubl=WI90Y z`vTgydl3;O%DzqwOAjduQP8#0D;SB&(!_WQKIY){rDN5@$)~+V><_Ldy zQ+3D7VjG=I5YV8wtx@cI!)t2)8MEb4b=)8WKzD{nJZ#br)~AeH#}&@VCiv>vVJv9x zVQxbBcDVimH--DP1>3ziiP{tl76~o89YTcsjhB4JJdCsMabY`5BBqUN-?~@W1c5}9 zo_{Cwf@-F%WCJ)n@(rjNZZ1-p1qE&{ZG^Wj&;B_ulZe5A7-1h-P&0i!sjS zT?qP^3UP1LBE?g3wa6@*+H|!-(VT`zQr8v5L*rgCgj(bB5axcfx}hOh{AMD#1SgA! zp#C%hI~r0D?63vh)N9B3g7~`3R-Wjkwn9?B;5q8dGhvB~?icfZ^y%;uuS~&m#A7{Y z9IzcLr_miIR*xuyYwXw&0z{5D31M6hQHtZ<0pq6gz4(sJ%kVD|lVrbE#Ugxgq9W-t&-FeMRj>aHd`Ho{U5Aeuz7ZEHrTy6c&O%24Cp z1`9JKZrXh#tke{>E%mo#kIPa8@M0PrVtb>Tz?pd~G$&M=)8K02G1yVGmMJ=tB-FsK zBD`Fjl#B$u`Fef06|-khiJ8I;I#!pG&!(%w5(9=ed$45T2{THg>o|U*1QOG6YB^$( zMfkt1g&#G7NB!U>^d|MqyCGcb@s=*tSYV;L#+|uwX(%BQ8)(UUC4w{UjwKC;A-DfX zua|5nbO(G3rCxMUbL_+VFqvX`PTupz6;6em%a>sfs~H5Yh?!O=31gWQ*xZ|b$T*pA zsfb$vwwLu*LNLOtn?HzGN@l_{<_}sXG+!`;*F!5UzlalR_+!$Z7L}CD@F*`9OJ7L= zKUp^M^qPQE7ooTlUK@AGZSL2HW9Qs9*4!d*e@498Uy^wr^Pj8l8nOpT?~ffo$7^3w za@QE7NKhJQ`TE^zq-aPdiFDY?8`#D6qxWQ1Y6A2?a?HB(NGwH9iR-~_c@Y67b)KC` zuj{C5fq0*6K1>U>eN!&1K0x{x)J4opqh;=g{v+o( z0HFcjgzt&R(01F;R#q(}^yA@iv7fNineGzW9B*o=E6~VFrZHhyB+{Y}T4-39>n`-i z{^*$n7~>+ii9Wp%noY(ER`Qee6QVAo7A1;Vl6m)`o>(lJe=YgLxg-oASJzN zMWWg@!Glors(rwAGLMk*lTG0`&JI(oikd42zk!Rs|k@9t0gO%Z_>99M$=On(sYTF70)Z=C}I4fyQ@K3VO?e$cVq%I!CxhnR)rY&-Hb9_dA67iD@_o_HI~244XQV-4cpshxWyHH|4Cv*u{mBI2@<()vtH`= zZ-+mwR{|bP8Kn#z^&*)MY}13eaK$bH#&~EhyOS&0d6cks1}V7e5IWf7RFW1^RP3g- zlickhq*Uh;@t$PWD@CR{A}*a@G^yy-`Ha=d(|o^I}Ra(~rR`pTU}b z>!fj^?X%8=z_A3uy_j1j$`llePnHq?^h1;=%;4F+>P&kR0F^I?LFtnHE_b*+sULBa zbZQ7*i+&{eMBVH&E;IH#j%YBs#vAHax+bR`0V%4!+Z}WVVT8bhJe3i2i)T~>ASdQ+ zts+XM-EMgXY8KSRb z3)kt#Bi15#+nS@#0ynX>IjouuAs&ecGs@Wi&TpbQQ~E~?=M-JCsn4N8OV)cz>Td5B zTQ%EF*Q|a)GF4}lQ+xNR|IOMY0F|xh4%#S!8AR{+T7-L#W#H*fnGTq1z3JT=`ZijA zNWCq&po)$~x^XVCncPoCoTBcMPBI@J`-ZvGRjhE{+`1kpyO*}{wx1BL!y+(cg%$3{ zqRg9d1uCxYxB_Z6myC0)7?omlJBNJk(na6F^7BPkU<+++y!`W_qZ2oUL8t|(CUAK7 z7xiPa_wKu^$(85KdRPwkiZrS>M+G(3RoD)Xu`mNyPS{E}A_~wbLlPLRVO)KHyNFdk zm8rQ>%Kk;`hnfIyApTTq<;9oje~2B{rKQ3V*$)84eiiT7ZR{LsYOaC+bQc|HBi};5 zRQRVW3jz3~0pcEEZQL>f%=Dwc?k3z{Nl0!Q{ObM4jzo?6>h85>F&1qtl!|1{Aps|? zf0>F`oC+hf-?C?Z^a?0P+Blau`Gyz!kCS5=Y(3G-VfMIkiveDQU5M`DUSt`{9B|7r zF_^6#qtx~f+?sY%R0*Q2sbNzs>Zd;)e(JM_6tM~-?MYrtfm{0Nbz27K?fTl(7p@fP z-Iez`b0k6H`2*nf81--y{vC{h|J+6`7`8*#cal5E#0aMW#v^)+u<)g7oosGEMfjx1 zp2A5op#8CniL$8VGOQ<;{oc@%1ZxF*v*5f=hg`B$e1JOzmYl=GY&nBz!RI6)ve z9L3)JsKsPW$OAVv$HzHOv-O1W-?odpB*VY$e;JdB+x!pOTk^tM`HyOk<93nIYZ ztY@L+z918PR&8xHe0X2B3sy@OV3ttA`qujPftP&Q%DwL$u%;CjjxM|> zOWL119f!D-Ir=CE%{Am1k;;U|OhAj=qi_r`CA7vRQqx&0=H)pX%`jv%C=WZ)9)q)eRduxlFR8a z4QyK18uLi2Bs|9zsqe`a5#qpS2#Sq%W_SL@K@&UOO;dvYqH96nhN%*+$AR@yVk~6M zI&;HtG;8q!6-Rbhm1u_Q-VXtq+ewEBpe&392dZ3ZKki z4^FTShgOWA_kaJ$EjtnT_|3k92oawanFdT*Gk~HPo_obBZzlb(qnVOrB#&2ypU1Hv zR@9F()YsM#_81A}{tS7RcPA$y#sNLi@x3!B04gX~7yDx9e5JtDNxJegq0f)*(Pdu3 z7|BL#`WCm)CM-!0=^nbFUif3t%<_-7{&@X|~Q`m}gI_tb5%{7lPvH~c!?XXfyHnWQqR5zbY{V*T!f~C+(+sV2GMG_a*p`e zEfIAcf{%`LJX*llOqY-*S+%J%rDJ*k00)(W_kytm(#3~RFmne<@_LYX%+`}4 z<8@aLnO0xxq1981;+b-k7cz5_Y(_muSb)8iTEp94e*5Rcw}0(FJ{?wf{1byP_Dm^= z(A0&0Q*9vY{kWYi@my;|QN8rvXsdwWtiOT%h`yN^aYP>?q07%|?+Eq6j;oW&xQlWw z7!tLk)(P9VA@+YUGda)}QyeoVQ#UDW?b`Vk^TMB1VLA478u#+=;riObuMO{i;P=u2 zc@pWU-@{JsrARCT*&Mt|SH#;u{?V@E0?rce^vGiC3G3y|#k*p^5^np$m*zXQ!viEy zerf=XIn)(@&92$p29ed-%S=5-z(zvN+ZN#^rWQNP*-wVoHmM1xO+{ zAFm4zbs#47)QfNUbL0+i3HH2Xb?f-weXAiYW?21^q zAc%XZ;)_2O8$TH~{AVJh6mdx8o9%b$(}xZ!)>MvY682|n)8~jCm#Z1q@p&fncF#)y zlU@=p3sKU?7w{go1{LLMWCdV}QkgQG-%Y245RXYl6(n3mc$E*&x+FoF$FQqo_Ni8i z-aFz^NKpRYTUETc*yH2RhEGjkB^+Alxw&Y*3af--aqrKlFZH6gsN#!8ehx31$}8MJ zSjG?9RkUk_J(xZ!3gd2O4ofCJtL%%$?f@KEyaI&?W_iZ-h|BcK=yacfh67=t!db#% z#`MSR?cM^p#bL2TG?EgkF!dr^2i_ZgWd>~u1niDKL^PP5JrK$tI(+D8VxlyVP8!CV zkMLh@w4p6gHuUC&Dg0>0m@ATg7B^36%BwE*RSNdM)>{aJNSsXjr&Win_OtbYcW-_V zArW>x=yBp0O1PR)p6w57rV1($lnCe0`Cjs5!&_~tBp$Frfp3be1DQA_|4@j(h?dXH zY`Ewy3CBTz&#mPU1k-t!XrE9{>zB?orr_F;K;?YG5ooRuCP8<~t!+k(5=0pw5?yD~b~9C+-y7R4b=B zve@6dQ(@f$E7$(t{*M@dUcy7#H>yZ*UvfQ-USY|svz!jqY|~&Okbl4xB|GHRD|SK5 zwi_gpBk_5SErHag>%Q=l{g}ZAggttUHFI{7w|jn%K%v7_Al+L3#zY++ll)%l5=O-f zB_@X^lx3fvepB8zbi-prlYLUW6y!zedlKUA)g8ilO+G372Oz_B~ z8sb?42>I}iQGg^7ctgaH1P(-t`kiP%;*Rw*@}emLvmMD1MO~Ic0WHij?E*nX_TkS` z^_I<@0P>(2v(}NEgbFprY-jPFAdn$N5cEt5@-i@}RI!=bLaALbXz}vBWMwFOE;$Ol zXJ-hle{9C0RPpjK@{9tOq^Y}wg^en@vX)%xha9f~_AabYQBW-iD7Njy(Zc&IZ?$a2 z?7yN=zE=J2%@;73zkS7ArF6emhO>~(2>%Q7-AlE9Y2`oGBScAg9)<2pfx77x=~_wR~5F6|o%(u3hJY^w9v#Vov;Nwc?#cF9-TX(9A*nld!k5#iZ5bF!iHE z2u$X-3J5C=jJOHQiN*$(hl)X%D)J@I`aN%xfB3tXe5J;%7$$Aoz5=p?yAL39q+m6prgro0&2KsFw~zO8+fP$XYjDv!K#Qlm@H(*l z@zH2pXuX8ty&S8W-Q9Ml5LvBqKJ&0*RLeGAwk>}9SKt2qyZ@iB2IP6G<{Rkf?}tYN5dH8Z!!M*5ZPI*=sOc{YuH+;vwF)MlXrhRyys&`MR}1xT)WJ$RtSEvsNr>&2@Y5xC!zT!U1w*N`GTtUkU*En)qjdGI z7%~qlBS`4_+8rTpqKK9(iz^n^+7Q%}u&y&{j+;!}aT@xq*M-DYqbOnQ?vz7uRFT3o z>K?d~7ZzOK#`LD4Okm?hXArE(MfVSgr1LkgS}uh=pQLB&YCz1wJ0+G97WU(ou3|Yp zf)@3OF6a*}z0kBrTxmwcJz}0C@x|5l_jUTqn_L()u`s`kTAyA zYw7zkO9O%-)4X+32)KtjSi|Zq*H+&XGSDWJ66trQ8O>k7YNdtP;AnOPD0mz~cs3QkY0E3Qs%+b|FK)I{4wC&wtl^@pf;(6NmY zK}C3F3#K)rMp86V4m|0SY<1XN_wjDin>srI@81ts-4-V(Rd`PQi6(#8aE3XZ2)GEQB|uG@L!6(3?q@b zPo(VQkj2|yrw5L2DKtWOL6CRRZx%*xqF{D838nUVhdG6Eu57(D2S#!hxUF+tH%BZv z^fdQdMwxJ^w|WZJ;i$8W=C#!=TWKjgzZjd|T)`FKE#5h?VmYUIm5|bpOmvY7GHB&X1iN4c{H85J8{3o~7 z3;;u;BE#35L?_twHh%u7;Q{Npx6FV}uaQ`<5Di3seb47e?)hn(NmgG|#PTd+R^Jv1n(Gr7vh>NSGDqb%2`Mx?O38@SP#HmL&i>Kdba;MiFJ~WZxP}h<6{Sg6N8*XoU zS|-a%R2x)Zq18lpO+`8#s!v|7DbhPz3^FJ6&K&kcW1G5#fTgkMfgrH_9Z}Fy+y)p3 z8;S1|93r)YG35;t*pJqq>i~88GAA#aMH2zqC(t15f^E27bE(Y(ge%e6J=9OXu8U>*X;7LBHla0WAB#gks|@Tial0T zC+o$MQzG2j%P^i|O8bvHhl(EbjtFG;qj??T5L20?P94xAao5Ex@^7tV5F{2Zb-f;b z%6USdSJZ5t_LNY>_Qy6_63-HRMLnDjbfg6m<|BYb&dt=!_Y4lSf#k|L)vH?@p*nOm zcXaepXB_dnl~a1$+#F}66aF_5k)kjyz3QnDA#-1}Y~=!nPG7yCbKWgs`Zz#xO$wpc zBh-PViwwN0brXX8r6QK-NV5wXgO+k6YicjJM4(Z9Y``=X!L`6u$SB-94M)2;iMZnE zET#JY62rdh9))UV9hJ(XvLPhhr*xm9Q?l|xH!q4*FNbhWu?%q`lK1z4j-r|_x_!vE zLz*wHxCqzR_ZEgq1x?SEB$iqTW$CeUDmRoWn`CmnC|9s;F7&f2Z56re#yP8hWm@t% z#b^ABEkzp**_t(zAbpFw8Lx*#=G>JAqpsu7LM`qcz`>TpnOyh*Th(WkQk-&8of2hd3oPC!_A zp*-cNmlh~sklp1UQohjHU%)lEiTZZrN8?3*K$DI>v*dgjV%i-U&6>glErQdir!P)q z!6~p+cYHsmVanJh-M5*N2g1b5fk6Q_&cf#aoWMl}xxq@AZsG{nbKfR;Id^z0XOw|L|^OJx2 z-;DqL;5+{f`nk@LTto;jiKCO~4nkj4DykQ~)zA-yU-vGQZ_+eoFJ%`K1>3^6OvgJm z|3?Yd=ZFiCTcXdjAJyX4L{94?z=Gxbod#r<5G6eJ@J)XXSct+(bu2`rvTh9R%O$#o zPkdC^wZmzTkhhxJI5;@(FfhxfEs!?G4lISGwb2TS5SYCYJ2+(u)zp<%uANUeAJik4g!m@pI#Uq)nw z_dPiSz$#LQaV9*#1z`hdW|o$VnZajF)sMYERAC$Y+jVkT2(>-#%&Xv8PRA>2-Y1Pw zuKcwg6;%G zF#+G=_=XSoul|O;nvH-TL;PCc`@`>;pftQdb6_7%ywK4CZ=jf3=({)1m2&M1Z>?L| zkgpXH)afz)zkl;=jPr%L4b%9~(#ROH{R=DmPX|VrGr`<1Rq)Q(HGJILp4|bYz^SC= zIB(gVC5+V+%WKiS(2qVHJ~wq4dQ7=g>`0Kt>^`V5+VdW2Q!?zp90b~x4b!a18=@LO z>H4q%z8Lm%H$)7O5T-qhOW#a9s(xqb=xyyvo*}RaI)Uj`MD+I;Ov|1|_mrm~D7w_& zSI(!HLDqhl1uY7fQT=_e7$yuY+ph)>%2H>TSqG;+0#L5HWA8#2btVh0LvT>zvXLX+ zJf@iGg@wNIH&HwE6co^`B*~a*>Yl~hH^aAo>1&9|a8@@3j(uH!K@?&w`+`PC<$l41 zMFXi7*O4|z=u;}|mz2naCCwO&>D#bI<6NS!R4@eZt<&Ru8hQ+ze(MKUOA@!-Oq~{f zP>ZhCxsUdOqQA`GYc!P?PA%#0Xrry65elx1K% zhFkX#(KqD8ebG<^sNwo>ql{5um}a>3=KVQcRKBy@=TCEF^p(m}s1my&9yaUuUy+6A zpCP`%I`E(d*#;Ngqh1}J*=R!dt1}JNhOo64f(&ncvf@{3iWTzl?grsHb*20XL$r&5 z=-RVya!oHWI4PwxMb2d5P1$Cd|~o}QYUw#EylHkM1x^sgy}yuAtE7-tPdhoEwYhgRZjCaVH@$;GXQ1)jjP&i?<9>`G3v-2#xT!TdG4z|Of*WNfQ6du?cV*gs=?pzKSvMqLW&h<3AOBgwwbR=30ZFy2(X5=J#vo9}GYAOVo~luv!Gd zRsA&WR_{1PBw*Sj1`21un;|rejH_qzZA6p2eRtg4(C~6ggRRMp=;3ZZ$ok2xbJ;+- z2=SPHI2JP6yCbPe87}lu#DXb5qNcD%v&dhKc^bO=l2bP zF$x0w{yX&yIJdOj11W8Op0vhZO&r&S!J8)Q)=8p32Fe6WAUm0jeM|_M*9p$3Y0R!% zYG>Rjv2Z=L19d_$wicjDLSa7NvtuFBEX1x1jd)(=0Lik9@ap5ZK=4vR@Ge2hZ z#IYU39RWX^BOO~h*#sIRJcd)~0R3L1?L2xc??c{6a08uhDU)8Z-PEHt)ViVqs3|kh z=yJ!(R#!q;uCa0g;P8A*A(wPEg`!@lPm8);f#(d`5pcn zey5&<-qOfK_4KoCM%?O#v8GK&`-A5SULE1R7OSm6*OoLfCBGomjV|w;d5XJO7nh+D zb;$_)J)lXP!9p;xL-WWcFpN4{(v8`p6ZR% z^dn|2jE2U`D)0k9WpwFDmhET>ve(QxNs1uiHr-lkGXzeVgwR7Ot)0eUty_7~UI8&W zjydKKuMHta9l4L1^T7x<1Xe-l$xV0;C;;g%ZlAv-pq4E4;?qmQs1`+Aj6J}jlf7Rb zSf`J0tr0x*Q3!x0x9FJAJ&|oeqH4Fo$AR{(9~D<&3w{^U37rZQ}WjZ0mHLE^G& zXKmm-dLGnMWn5wKnxl3#xyVx&S2vt;f~Z57Y|LR^RsmGR5XOy1bGI@y`?ak9;ufT{ zB5=VPz!*QQo3=hYs}HKff|{v$i5e1(grtaEl@x&GcM1{Yf^~ z1DBRvhZQlhVrtn_dE@PF_+3xIq0y1WGIF9=MXRZ*(-izm9Piy6;4wFC=I69tPes!0rRBJM2d>`-BGw`wSq$BvS2}?%cdjiizV+g`k7>=>#QNghb$I?|72=U`i zP<^IrZ8`POB{vQO@Cm+vDhz66r2ph@nxbp?(iw}|F&2_ECK(6(2-8*#Cg=cCPZQ%! z&f+TS7&#ChPy;M9=~=Ht0e^?NkNHx=dU`9(4x-nCAH+6PT|{w_UrxJ*Y>`4LhkR#* z!JdHqMqp9-QrxfE=5>X*CHLLHXv2m+_3NZUNuP1g>*Iv3QnKyv;BewGNHkhSq0M5D zcW=HP7LngsRnH65kJtY=_4LXZ!i^Pe469rJL9OE*cld#%r>FQx;7177rP zZVc;>yo{saKC~JAZd!qWm$57I`*Fx0oI*A1@JDV4vd09@tr^}LL6)ApsOHIa$gSHg zPpF^wF)oF~t)CU3CtuW6!DCWE9vS}b%^wf%)knDtGVR0nAwh#MtRp4+_o4SHyT)4? zOR}@T1VofP=}-^qYQjlp%>dj77pqr9I5Z99!G-XQuN3U-MiAw)JT;$9NJD9K_M{VNmowTH=Ipq6YjA33 z>U+e2>BT%OsuC22@RdIxxBGJ1H>Qm~4WmnSu@^lOx3al8eC_Ay*jrclpyQrgAY!<- zYVg6>5GIL(N19o}p2J%wAl`0Qb~6~;)zw~vCR-`4knd6d#sdhL#equdvx^2kF}?JN z^IAcXb)&5!Y~f#Bx3EiJcnD^H16^?*^cgXUcX$O`Lt31?@vL@8yV^Kte~8LRml@AnPL+{p%TyWHWP zu3&Zmo`TbsRGGwer)t9GmMZ;(7SW5OB^#kGy46wazn5L#0$p1Auj{#B}9TFAf|8rK`J6c zm()HIjXg(oi;B8lL^Mv#n5Mm<=!Dj}F#K6W@$tmqD5^{R9v#nFG@O1lTm}@A3o^WP zWFKo_9Br9_Hlc;q_e`dr)0)i%10Q`dd|K|v;c!}4vspQJ7T1`4^30kh_GhGrEcu$J z)Pgw0&V-&>V1W7AV2vJ+8v5@Ux&>KE2w1?0LvoBN}(5bJN)9`O6P zBUkooVn)baWcF+Kts#-`4IeRZ+05{KcEc?UAqlVsiS0|JfaxNX>spIc*6xvXFCj(| z2O}8X&G`6{fMVncd*(5kG(?t+x?k%5g!OAqK3dbw%(dmESkBk*|P9>j7l&hfXe# z?L-iaq={Rlj))?=Ftg^Slf8_hN7d$f+I*|d&-9M zx5gYl-IQy2O0ER)_K_x?wH0M#6=784PDu>biWPG-6@Z1^*|j2RDEmIH;2ba4XRXGn zFfAuuOIfg#h!4+ixfE#4_zL}saf+6N*F_5}cSHW&n?D%dv&y4ODvXcKgE+80#ogffN8oDMI)#JE(ub ze#ujqppQZHX2g68LjrXWSj+>Nc4721#FJ1@eh9Mh@GvCU`hvLP62}um_|fp;hjoRS z0jMaDf1E47;s>f<@J%$OmjJl#v1)!Is?T(!4z$34P~yU@+_L!w=Da0M{&yzmgR+gu z-g1zX2=)PX`o z5c+`0yT!~ZgwgdwalK_9JB-KfHRcJ$-}#)L2^pKZn5xlkpM|kE?AG-nBD?Kl5M21< z`qSF5(#^qVSHp{|rCKdshMe(NH~A@8VA6W^i@D9CXzC^|6g;Sx;9CWdE!dtsG5T0!dV8KiU=Z7Q!3X?a&pBO+!p9C z>31dVL}&ksR5=z2TD3$B;jxT;a(#kIbm`4B3~ky4y>+4(ncQs_;I}FIo?;dq$Bb&E zCYtayd|>AK#X?}k1Tr9X#^?+yEGbUl5vtcun%D8shknMo3^3Xs)P7*jY3!HH(~t)V zRezUrz2QS1TYJ$3FC0-ab4cp{kF$4sjx5X0`}{Z9PkK8VI)WU9#y(#3poea4tzO-ULewhx$I20+CdiL@^ecK~m^B)PcVBG)>oLOp zuEz(2j>Tv=I<#j1MnG7GQAC?wil<Kh!MHBUHW1{Eq8z~t zpl;|*8WosEgyeCa)f(Kd*(Z9dVrBnk(h3yQW1~-;SQ4rfxloTizla_GsdwHmI9h7L z4e^r+AfTH~Q$^6ZC;r8Tybk;IyBk}>%HGDhuf1{uvj2NFd}DY~!cd(ghJW~<>PM_u z?pp|OT4?i&2mk<>ARgq;Z*yCE=@xiaGqS*Yj%}y~84BK3rc9|G*^oqwA3)=Hf1sRy zKHUAAx-Tt27Kz*Nzr89F)UQMSOs<8I#IlRi+M}C+0m_BjeR49sr&Qw~^<)iN0s!0R(6I_RPL`W(a7*aJrWnEZcVYgkFoS?Ut0^fi^ z`NivWDzNW#W^v=63~PS-txmL4Tc&zP>`{@qG{L0x)traHEGun_Oqtf=fJv@w`PuFA zEtjC+56ZuQXU&qJRG<;a)uMs?&rCp*C{74#yK5taq8?JCB`}$N?g~XlM(&(EQ&WQe zgp!J}kgc(|)me!9)^|I!2+Rn9Nl|&1tFZP}fNAO6)%sD^Ehiz{2OB|Z?87XT*a7l_ zii=mu16u^nZ?k#}rNEJ)i5~5oaQE(+rd=03?%uus)i=Mm`|IKTpZe~MJw(Il;Isn$ zBzOLeKTSFVQ4?El({|Af-Q3+ExVh{>tsbGjUN4P4+QSx^Uq?C-8&)a4Vi~l;>~#ej zR)$UQfN0{8dhuUbEaKN`W{Er_AENYW&)_2M`A|Kao99aIC*XLPO;6 z3Xj!~iVsru?Ndh0F#FDbMl`;T(dxTOHeB3v^YFt*!zcbSt{HI&s}L|Ba^N_3|BljM zjVI*n>l%Q(!EW^Kt%!S$jTIwC>qRXqztl~f8CiJ$DQRa5 zJ}^QiL07GAt1cI6^E1dDsJRec20U=)jg@afLTTKZ>BF4-JsJ%Y; zWC(K?QQu6P;&okvV5iqdCz8&1EVW357BPQoP2*I0p9Rh{)&<_WSw*b~UPVp`%Ll2j zgkiiz+o2>PQ=^_T*v#|GdXXX}t829h<#zlA1V0v_>GlNv4M%M0FCGjt0eFa$FJWSI&90F^@fuE9YeviOe_T;9J(r8nhKR)$Zol|E@zo zG?GUiW$es3E7H4V$(7=GXr!0^_b+{A>VD)h8R_e~P{jHg|? zs+mQX*9<6=@a?LmRVXjHb~QqgfrYB9Aca)tiPb~mu>VCNGmDtvNHnf7F<;8frk&F+}sZlHxr z+5s~p1$3{x7Q~Q(1oc&w`hd*#-`nT|hqakK4IP(rgTU4N`IW=rEBthmgLeJ`c*gYx z!NxdGb@C|X2XpwG?;H6o&ij3Bn6ysewNnNRu}m9H+!Y2E)D{}3G65DKix7lG?bdAj zYqQ>cd6>igFFfeCLDyZ2@p|ZlBf36X(%$IdeXBXUk68qiXozVf zjGAamMKFfBj&9B^%3Cyy0yA(}*kQ91lObqi;}gt4oN*U|jQ zYks1p5f^H$S&tC?dYl^%iDc6}Q=g2G(^GJ->l-s9W)~xZR$>Mf8Ij)+8>K(<2Oree z)|cgjx*R$@j_|OdCx)GRB88H)Q|kGBHITy@G9XoHxpwysd-Y^VgJ`a}hTsG+{Uv6N z`k1G$O|VEsLct_2p*1?QhyFhNbokU)h2ACY^R+I`3L#)`E?U|394IixNIyXWq zzY?IU#K%|JDTP^#+$i^oBc2Iq-}>cGzg3DMC$ttK+n2P!uXUJOYk2UjCJ0ME*6~4; z9?*FBaClg28-jD&;vT{Or+?FE)lB`wQH*nAQFMf1|Xy-m;0w;I)#IIuE<& z=0dP5g=_2MKm6hF!(yTnRB}k!W-u$C9Zi|abgmIzC-9jzjKbbsOe-M2 zxb%%dZ!wF9;`DwY(PO6(v6+zh!Ql4JztnV)4uvC)G&f5yi*3g@eT?*mQo-St&kk2u z{*D&()!4qx>-t?y@|Gjj-~3eT0MKO8Dv}xg2U|hZw!7>#JfYf*q>qK=_BgxNHqp<9 z=*_)KYawrSTav0@7-i42qJ)E;B4;hRi7vUbI9hnvT%Y*OjO-*^zvRS$)Sm|g3YX=S zlx?e7@;2e1WuuaCLxfhpz%;{4qpmQ6a5n>pfoTONmbVGow?HmN+9=UeqI zRWLmEC)ZVv;U8b}#U7K!Pgw+I1-Ig_@fy~B-m6l#u;AlwIC7U!^|glNUj}C%My>od z!icio)()lw-p=~)xDIiHZ|A{q$yGvMAKm0~c@KNT=JrN?Q?R$Fn|Eg-8WQ z-NR8Id>|9X(y}FoY~2YC8NM3&-#6xb`n7UJ2wa~ z;VVbrXB$?S#pQyo2zi>6le?I>2kUw}`SkAKDy&Vm9 zX=TuL$|$^SV+S>;_Uzohf_$Q-SBZG#<1Zjri1CX-mv+#C)Woz8KUI?Bw8YHr%J9-F z(gc}HYz7}5olX=e^x@GCNRuJ20?Twjfi>!|9=V0l<DH{mrRu6I2qC}yzkeoOOg%V zhaU{lFK$=yJj}7&l#Ds9jS01isKS)@FP^B&#p|Acks$oSPq@KV)6EO8C70wJy8!`L z59u92{JmY~$O=9czTt?j1G-x@!~7zpS3hBsU*5z4PTEY=qmwYKFJ*DK96)(o`7cGd zbrJzK2O2U`DQ5#tG^H4IALpX7D_!P1Asm`Ddx~^_X(2_aE*OS4 zaG`&PZS#*xswZ?kyt(QGU&Eda?TmSQ6Zr~a8i%qKgSiBur9G)>P0ExSO6(;w%4VC7 zlEn-Aat;+NDthanSJHeUJl|^TZhAaS0P*=@WL}Uas;Ts=A#x*A2Uv_8Sd2mbk8Zdnxw$&TkIC=VkF}tW|$|OA6g)4+V$aR+wNWcOBX=nYlf*L{=)31PasSRNa?- zBG_@~b@iVOD>kL@N+QQWUDS-)1}~1Jxv!saEuOQB)9>&5ndLjRnuA2FKMhjS?)E5n~^#Oxs>G+%Qp zvq^b#Z|G;m)5A~R4g?n(LCEu3+diCk5S+{KRd~0rh1zy>XFqx_U3f`rxcE5+xmBSq z1=T4WQOA>dS%X+_`SvxF8Ed8FKfw@HP}OP3B?<2kOoSp}B-s5tddciNfu`nh>iq>j zr@M{(K^~@O!jPk@*a*l-R8~E;^`;1e_Yexqy70t$3i*LarfTMp1k^-KBZDO%2gglE zTXW$HWzGyu13PpR0qn~&Y!jrd$@E06_2>-kCmgrN(%qAh<{@)pnZANWh6BrVik_h5 zl4moq2Jmu3LhUSWah2<(D{8XTx|`Qw$V{J?J@lR74~kl-8s1$$Y54YGocBi5PJQ(A*uhU!C&=X@-(Y6qu9hwlzQv?L_&u}BG~60VY&T%0IJ(|Hm-SI3zm z2!YZQQVd0ZjN%qznfJSa7*6!2s_;Adl@;UUrm4B9T&^NfULN{~M5RfAZ%E6lc2R1T zfU_b2CPY7+3FCQH^6`=PAs;~M9CX{9XJWs_{Y1QkAGDBN5*a_V{{5)yIE~NeK=W>T zSh%%fZ0V1%U^e!Kr)D|Hp*lRo+f77|5obcw5o$NquceTAm>Ee8?OThf;9S7S9~E8L zMHqqyXewF^n6eW@=2bWH?%px1i+*waPH;gMiSB3!R@LbE3D1ZRSm! zZ)$?yJ`07mPaJCrc1(8){4yn)9b6OacrJJbZ_jRhD>$}7shU-)4XyX_una0V5jpS z1r{Zt6%$xJ>hZtw+|*z|;Nns(5_Sj@LzQLyKK1gCF50MExreo9dV>xVzXVWTE4Pem zu3|9Muh3Yv_*UH?Zs{7VNxh8eyh?IilKrd)q3A18t0dtIuW&A2n9585oCwBQw-yV! zyPY!Z=3QSBZG@QNU*VWXJOl0=JG+^%S+0i1iuZF-UAy#F8^av&3#;YDi zqJ;6fZv(sJi2fQwu-74^7wYPDcad2u9NhtXP-Q8UjF<-(?O?OoEMd^b*Wm$Iv@0GD zUF+;P7=QGQhB&4vzb|Q>8DI6B1GZA%rf>QX!#4#<;~2~egsi!Mypy)Xg$B`g-U1KiCMZCRKQ7yKhP+cO3SNK=I9_InZk#A?ld!N^aZK zpb4dZ@!(iZ^ivm>bl+c|2u_7;s-2+cHZRHwm<*5W_YmLRLf-$`@DG1<_kR~Zf`9`3sd!Aj!Sm6l!{?&vMlwbfTO^( zU}e*J*itl>GncaT)>rgx7_!zy0BP3K&;mmO`3(bX>=B1j@)OPlyhPIYHwz@KG?U3s zzrv#>y-i3^%~JxmqB}Q@34iUD^(>1l4}o2aVx*coWTrvwmh`D&GjLxv=v&hbPfenZ z0kc@*E)5dat!SjP6Y{kuh=`S(rpn&?B6e6>%(@FfS-{!9bsVFGu~2(pOL3h*@4Y_K z6fWN|rq5mAXTymZG6ovm%vb4PP;iV%H`ISGjAxflqhTl9~&ZI$VyRZ)(!VG^Jdo0x!ko%ZX zejsk>6}CnFnUW7`De5}nb*$856?DfY@17489OUqO5}H!Fx(>HW3V0XzYPEQz)U^0*Cqh_bAgv@x^Uu-GPyY@gYnYU8M-R7HP8bWmiE| zEUEB>m=F{8-HRECM+w?m;au$K&$Ilc>KyiqDD>D1v9yz-&xT3Sx;55|xU}HIH0$G* zb%@qWyvc6T{TO|kQCjX%j|pPeB+j5!;&gd~HfZw-eJ1Lcb)<2~CMkvSSIX+uh5h01 z+quxpBJa=w_0r$O7C~=VU&=Xr6|M$SOTdd!+Z5*^L8oW8RGrszfI?3x8AN1wnSlG$ z#4Nc%%}{}@QT|oOAuUO8v3M2+4y8omX6%!t2m2cnRB6n_nO@V1r^#2_GMjKQSEC-Q z;birFoH8*$QR;ZOsZV|I;n3?bh-k>Yiy?Vp@KlIV&Td)MNo>KA3hpBF0r9haF-!> z@2I)2zc6lf*e}MSoL~%w)-S~Uq-WeJ3rEVSB1Pq|=u%y8#2`O5sTfwS&?L@H;3Bm5 zH*)`Jkx)y(xXT3lXr9yjs91)?^3R96zbSenJco@ZMIksEuXNGlMyl12ySr^J&w=UQg&G8bC`lr#?TI3-1EqaS`ed{oP6=%^m>Su2;=6x?A6_`$Vcl!%%M zcBux*&x@z|+o|ZC;s`VLEL8#$li&tgzOK;shwl|7F*(xf#BQj+y#JHo?yt@F*chlX z{58LMg)yt#gs>MecyLhA6ka;k{Oegr`z`!QKNXtcH!l%WLA=+Er;2uJPT{ZYYQ`Zv z(f#llk>&dHmk4zJrW!pjVHGns8zbx!$e^Sme44$sk!~U4x~2L%g-EJ@_W@lFx4_Rj zJ@cMAFNop=Tz!MDK^ zv=f%p?HQd1!>1n?%j`uAJvOxeO(0xZ*ha5g01{0oP%M%HpXqu@xsSK&f|Ld z#E#Ddcgs^<(P1$9wBCqHh0^sCl%aN}9PpL!sLD*_7qISuZRQ^(yX;f_Fi@ClPcM4J-Kn8Dd^`9G$2&+eL&vMojjKo z52*X|3ik2}pSGK4{!kj6ZoIV=$@STB{Y0Z+@T*01DRv_Bh+x}BZHVUBJAmKU_rS=Y zG>eGp_DXfTm6BwK{Gsq5Ta%jldYhATQ^%teYI1FTolG_6dwKGHHAwM-hMC<43` ziN=G0eXgT)7;lNInX%T(>cLn~rJqTH3!gk6p0C%QMhw&6`JFGhw7RABWd%?|-Mk=N z*e?bE#ThHPrbT7O zZT4G0bJjZ1_=rG23;!UfQJu;v27{Z2p~2tQUKI@QAyQ(|+zG6?A{FJfe;73-Yf5}s zWRY#<%7pQ)#%`bbyC}Ew{aV6k29?e|9l{{~93^;DUz_4*MERjSs;gEwm0svl zVZ8~MBsdzbHY^abhXN=_Fbn;&I31^#*0yzTrf)7BWhL*W1jFn5F?_zZbZt|nN0MS$ z!(s6!#&L6THPr$`e;v^V$nXN}+lM|((p4Coku;EV|Ei}{IqGPgf#y7H7)0&g&yZaT z4=qAFpU)2T&InvTWu3Hv3Nglc;ENR9)c)k-Zop$+b1px-F$1U@&uttqD7EvbOF$s= z0;FiDr!6As;jU|jYAcHN8i=GTw^yfu#kpLNxpFFlyZ(%s$!Yb{Bt^a(p2#9a_93K? z;1VK>lvfB^+@Aj09!uc#Q8RF9dE@Dn;F!cU6Kx<*6w zCO@yc&+Wa6CWI?$PZ!@*<8Q;4Wf2%?B&2`ID9{p7UO&d)B5=%O2}zsO(nCuj?^lFb zDX#7A%&lrOO=?>HTt%jy?)t#l5?-oqTtvdU_&z7=78(mg2#WHG3)jO2D8jK@Lwt+} zjL8nykOPoWh%-17giY8nU@756$5-HkfHUExP{8VaqZ;JLn2~kkvM?I_9Gyi~Dr?aM z0ssS3%-Ca8I|}*CV^O>|1e_KP1d*53F=IgI9MN&=MRO^XW{52Bal}$ds$(a8 zU58GHcZgR%6E9}uagF30c5DKrur(-c?!i$dG8&h7t6%CXwL4{3vXp>~PB<5uztpMn z{*Q)lerX*nSkRcYuHDK>Rr0A;ufm%is6yIG+i^j?5<%VK$4l;Ilwa*)`lJ=MJyoKh z9=`XKwz~BwJTy|KJp_%Ko(${t$-DnBy#KRWW79+^zWSz&^(6(oz6c_}X8lx|GtvrOZa;h=|!gaZSfuud1AQ2;te3pGFnxMQz+Lq)4yg%Y(r z2q$2d{BcMkd>VevOM@uw)(`>`Hf7AXNuWY`fDtX@TUNt5hg0_qDV_E2D{K$Rbjvfm zL(1E_rQKxLlHsYYB;1_tXrmGK>*=ywqI(6+&oOiQC%I&w78zi?TcvyIXX=5FvNz#^ zGb)#8ay^;bo9QqHOQ4t%i_fBGkpgn-Lu*m64p_~IisCcOA8tg??;=#PhU=xO3vf-k zJ507k5NU*Gts0jJ%i`dbIg2dv*eoPlxKEqAQd_xu_xZ465v`;uPVn;fh?$-VY#rUT z$&v#!LfXQz5kXwVWW!VSE=fY3(7)p!wW^iAb@`MA-%`fwX|r6COA+H2+ceAZIFV%rSSq36^RuTz;~2!7;uzSNtou{wI30DyNf-xtE4 zEMk#>;31joSWN5kLajw@QwHlrm&Xi%t4%8GcuN$cK8PQQxm5D^c=*~|Sd&wr(LrGr zBk)6tHir!t&T>`e_Z!%@juILq1xQg*-H^sVz}M~$Q=HVDMP+dafYw>SaD5y$>O?R( zq^~yxdqUm%8I__d(8KrN9e(f~?+ZZ=dRe14YL$;cSZ^0a#;}o(q(%AkHnw8J0s9qi zM@JmzG`ZwFg&*)aO)%HHSVUt#^n+P&6|r{;vybTX3{shFNG*)eIE4Z>Gr-2-DW3Vog8TClT!D$emb!53B!KAluE&70A0fCXNN|B=3^aQ{o z{NiX0v14*lMU3}N1dmSJ!esxdmt$d&7Oqo%%=%&`ARNvDh zGUWEZ<%g{xH^HFPI+!d=txEie9iQ!YRBJY+6udsq1VR~@76n4tB4F!WbVbrI=8kZ# z_3=Z&aAZ@}-UQ77UHz(9o0cYF;**JA8pVW7fMdVJQZxq19@C&s{5E)`&6e$5q?O!h zPDxjVLXt5^%Y1~Hijd4W+U1m~Kq#f!p@QJq7G@7)IAFVkdangW_7!XCiH0Ha?=Hof z3Q2@Y!!jpb-ZKT@u!YHH!Kpfqplb)u7)>a#;cvK3d(@v^b}} z3;ax|&J~J{mkKMew$6bF#ns!EV+)gZQt<;Hs?AKUHRh?LJPR!5gKYf{N;QyJtM6bd z^2qw>26Z-%(b1F}D7W>ko7UR@xs4XMsh}`TnN+*j?PJCHzxn0xQ(p|U7NMb9+a@(& z_3vkRmv!3(4(Mp4H8$QRqZkdqbv+3mo&o{c59asQqXqQY;Q%K|kxj~65 z0oerxx1Ex_D6b(x|3UQHJG~Etn$;#ODenI+M9;Ko1Jt9hEMWIAF6fJQ(ndfO4e*FQ zt>f4MiI7eeTJ*A8(8?HmQlG@Afi1k9SIDjd=_nKAg;39cTk9)=HP7Nwen_`r6Q5&u z-25I=H#=#JP!uJb$j!Zt5(Z|az%4ywOcqdT8_OrIs({@YV@ip zsM%NXdaaD`_YlpmT4e=S*yl!YY=vF}*5OD_;7Cn4*F(trr;ShiXAFEJzeAR=5RUM5 ziFoizswv7zI9#!4pF)-cJL|BgW5`m2f(n2r>9{uhcXTPe!marYIIuRox9EZm@yIV; z$IK>LZNgt-fTdx+3o~BWvB4q6e3Cb1LQW5GKaS3$5&>y)J*l%anDUFaL!dOc;uKcbr4NLR?t$Ky43Bqyy z#7p1fuBU;zrU`v@v;t=n1g_g~bcTCbU&V@G43xu(f?9+Tu&e1xJlV$$(r&MKy06DRdOt^gv;)CLG80ks6c*@m$!V{zp$owF7Az8Y)gz7dj- z9{M8TxTslO5rWMrr|ch9bhH$#L+yO4=>gD@(ekWyD|Y61tpJ<-lx}3EBU4O_EhZ3Z z1+ZFupWc1=9NxF$-0bQkc|o|xoRhpp8lPp*PCjU zXtPC_7a}Ppj@4`4x#p>?*-Q=Pz7TDwK)ZkL03Hk3<~iQ1q=U1*&*z)-WD7jqlqq2zGMRI zDmUOObvafd5N9h|iY~Kk?o3DaI`%dSr#?uB5$u)P3!cpNviv}`#YIDUsuaL0Q}zvv zte3(qQ>_qfX{?&DZd>Erq8_f_N3|HCAP+4+=!}8xpB*3-{Y!xu!G&Qu#Y3MSon5L1 z&TAn9PA9aOH&=jI<>+ulYRjwW;d#JDKnila7BqoGNQ)Wc&UhL zH7yT)27su^T2l!*3p2(Qcs}ga3*VCIp|Mw2mTCkkpn;EKmv(l-Lm?RIG!-1Hf1|xJ zA})AGTxbN{D1f|TO)aL5CPo&h{}#c5OyLn@opr}U0TWTsYE)D67frx_Z>IHl}$Mh=PQjBL_$Tq$1`|buIA5g7U9b7*&xg)v?daY*v zla&J$uWK=wsW^1uB1hb7|ANQ(y<9J>T!+Oa^tmpbfr!ZP=OTop0Owt&qY~|UFyV9H z1OA{4iBJ6FV}w)p4O?>)d%TKMNduKp{lrz5t=_3#K?`DpmPG@R!qyU@}t7j)mLWN#DZw6f3;CNW@`sjq% zbCCdKIu9-q>~YP(g%|zh7NTCSyPNgf!}scMgek{lY^|6mMt6bk-hDZ&ZLL_mELy@y z5P+!ZU=K1=vta`31FKTY0Uo7!pC)0ODcLu@s`hUuEf?igp?`%c@OSW;5%l#)VI=VK zYWu*-F>>ms1}1(`r-3%8Z;kNLzo;J@d!KbOsntqTV%{&9PlYuV^YYSK_pGyi+HbNf zog50YZlnRcax!X&WBo9f{}C}zQxDz2bj8rs;!gL7;z?odRG(?+_wX$*GPIirRBoj z^Jn!f_yQ^VsZVkuCnT!`7|(+@Ump7$fKb8m+AqvT$z20ng9s89~+$BgjL{gRunhFC9_ zaPZ#slIJckmcoTu+3rJ_!9&|6a};q^$NjWpO)V@w_SZ-edsSw2-goM zr_%t$9P+58en{(TGF86Yt~!;4{8$^h(t2i_kS0u59lZw~hLB98UT^>?@HK4WNCBog z>z7bdlU&287_Ak*^%;`77NqoRw;vT1F^jlbKNQW&;rXs5;tDLSxKzEowQx|KuSvvd zo>Nwn$bZ<=!-hx5MV^I7p`NC z$oi8oKH+=PwpcdBLg?!c&7Ldjl-m&lQ(~}TSD!1c4SRXN0%U-W&fX+A^T#0R_=CI0 z64|BoO^W~JkYxtYP_r1>@up}+A;qQ_ip7u&!mEwn5#^8mG54!v!;z35V zF8yX3XfoD}d+o2VpfN$}C}x1Fr$j93m{2=GA_eoXNAf7H)u>WLFJ`}c_q&N*+m~!} z9;x*+_D6yG*AFmx8V6X{WM*(b?Z9YimIP&3!%wunTDuBw5Lw~0qsQt0h@D;Zn@Cel zBZi})$5ePpsMkW8H1ZkT$7eHxe>f+skEE}s0=tw=QZxgW@rl2$=2rCM^*K;+cOeuv z{&9DK(Tlwz?*)J8Q5~v?iCl+4_&M+!M-7yq)!=5ICzl!2Vt0^6NKi2&3K^iHN;-<8 z1$`voESX?~Nle$z6M9{}^2g`qRh_@T@O*jLpQ4&2sVj$e5vhd%CyHIlT+S zGW;FQI9k-J(;0aJ_xGp{{dGbMF^i4{1YB{;;GNx}mtD$ye`!-;1wO|Zy;=}ktuJgf z_tD&>{M8VohZ6XkKH0@B4iY*)!pIZyKOI&@r@^4ZGeNEE;b0iJnv1wv3+fr#Rd+pA zGap)K!zLk2Lw%GgtKv6##kGgKcRzuZ{81DKin8m!t>Jlz17afT-`FfUe|v)1F1Du6 zxFl2kh}@UCRa-!7!)aFfZ5EpECj5<12x`4KLLrFDYT3{5wu*%?(a*Y^X4b}%QHPC# zFDh|w+V39S%F0Q&i@NA8C1bg{GF1b-4)zng7HEW$wwFWLMAO6ZTH5)bZ2{D)9V=!) zpZX3ylE;fVn1NMd&RRuh@0Xxl*mG?Y*hr3c$7_a4#Dsh%XKn7OOr`n`?P0QZ9D9Cx z5hb)r9?j3|!!X9Y-w%XT4*@8=h>Pc~s8lKgei9H&8RHF+%W0NWlIyK{nX53`CY17+D`dieS(`{MQpl#m5N3F} znrx7uwKGMNbQ?PlLYfw3Fo}_ALWS3hd9P83Tik-kPCgY3h(-G3CkJZRp*rv0y>N4H z*jV@D0p|!;|4NZm*2h%zqV&0qK%fHjT(=tLm4*Kt~6MUKwtQ(kkZB(_7Vy zQGa7yNNtP?cU#*QfAa|$e&Fx5E|r$-OllXq5Y`Vs>b1e`%2N9Q3k`y}h*lHwL9cpj zk3`uJbNXaa)WH)(IqH967!ybDWl(cC^LExSq=+)u6RX-2lLSu|j;&{-X^ums$pwyH zHAFsI$XTsA30d6k>$QIFz_yEZ8rKZ0d7f=VZ zdRHCXArxe{-f^Im5Q6ty^NVPJ&%&1-O_fXa`zrnVa@h7+(XQ=Usq`R=>hZHMmbfjE zkL;5G-LVKMCN=HO#nKLqwWe2DZ%=3gd!d%_Y}hQWh2sJwN8X3Z$JB}2li}IwTJkkh zipD`p{RUxie4I9zLfCcR?=66oVHN6!PVT9QiRd;tx@J zdU0TRkHl(jTNv=VhV#x{YPo?>J2O!kEW%h3wNW=SQQ3!^8Fnh#7M|mKMX1*7D;7&1 zzk?pe;ErC_{*iAnj7h{nmnJ!XLTcwQ0vnt92wU|E#a-yz8{fi~)OqOjKI2G;`uSFE zPrnlG=(;Zn_w%#5waO;;>_JNd-Tkef1!!Cq(|%SVCC}>n@Bi1~oBvoGhi+lG&%8$; zzM|-;3m+c-i{b9ymn}0UV^zZEd$?17WL5a6V($9makdpeis6|RRf(oVFa>DhnT!)( z(nAbl1y8Zhx^JSqw%k-R&xP<`-Z@q|@rBMRC^tgSKkJ!K;6YKUgHmX5<-BVXZeJ?| zD=ragVq@ZE;RzD=1%FSH-tT=(3W)0LBCo9S)%xKE0UR+-^_`(6&4+W`l8_is^*G8& z$H;{*#Xb$#POtIa7u|nQwyz^8xYDdE^DTSA48icV3fMmf8wv{2@dY5rNs7r4t0h2a zHIYwteG0U=9;RM#UExnb5(V?0%<+L)XFLj-K?u%VGA5Fa-uNRcxIYZ7KZh0df`5+OY#Nc|4z63&snVnLh3|DfUzJVb|g01?F^i&J|Jo$pOWKRH!~q zZK!M8=1&9et_ubJVdezHnEk-^Bih|F*F@5u(|@{d=TC=!Q73u`Ny8ZXXesNut!y+~ zs5c9h0}lrYx5=Q)&Tj(ki2Q>g`xcw|d(xTrTUoeWq1dkuR2SFKaoAL#XE ztjQLlk>+~!S6x&5XCLu1uBpj4y5B9w(fS&rt7!f^mG3CzIM_YQj z-m7uJD6FE4Ao>dgj^)JxMJH4FUQ4%YesDm!e;4f&zEY2uaUBy0zU03SF&F7}* zrpR;BVoj}8LwD^eYGlr75#VUBj)te_N3kol-;miG2Ret**f%5`bZzdt-yOc|$ExX3 zwsl{_SbvLmRG*#GOT!2c=H!0*CC22TbE5A zisb^yNBrL3Ve|p&q-UihyXh=j%L7Q-!JDdd+wjiR^kp8dj!H6G+_r}zRyf1;>2<`I zCdzR65rP>d-fgdewzCuWH9;q0KN&uob=!ybf8K|(;zx%al=NG}_iX=Na`u#F5;!eJ zc{!|?3?cD{MHy%Ne4xKWPP19u65l5{rcnWAQUG=OrI!Zx$MEyT5$B8z9DeYFA9ylm>$w-1a#cw-D3@zumPU~8 z2z=R*{uAm79K`kQWrjV7 z=Jjl|+EL32D@qep9B6K4HFg0=ZgjN|Yi{e(ka&LRgXHT1J$N{LDfbCQ&Wk5Q7$!sab-j%O#}7@vUaKa z>&A;=>=90kyd)i#e8-X_CFN7fxcrF?rNb|EQbikJsGs<` zJXLNdJp*$d$@G4FN#{BOF%OGEk1<2|nt5rPxHDF*Uy6$2jAS*{cjR;P8(ReTK-{iv zu&$9V({M)tn!pVq-?p(@BSH%r1pzZE2Al3QM7W=32p@Vyga{!L$mE_*0Xc>x=JhH4 z(f`Xp_*y6q})Tz*=#;++($U?@Hc-mbuIKdlLc}J)TSQtVmr)hq~4U@T+sIK1_zWW1n z4KBRKL&(&L{DT_jkOq$2nmcB zM5kF|?DBYousKe^BRxEBhQbOx!YnUeW>!FVK)&!s6lo~^ee@Fo*UO$BNdDz$6Ocv? zA5wP?1{}Jdta~5RlkeWq_}n%l*Z_nYuk)r?u3174tFH}@eKJT1xskDjRoDQ`-rJaa zCJ%R+@-UNzJe9Vh5E;X~cvxyY@)r%JwjUlL!wK>1?Vlh)dlx1m-EhK5FBpGS#Q_tPUqcb!_AE>?g3SC;8#;+=+iIbX|2um#mE7Nz5?xKfOx&VFZce-V5BC;&4 z#}XkJZh1S*w0g#Ul0)jNexYdc~!^i4$47{WhSJL(UMWL6a8! z&IrX@m^N8G4QB~+zX29fe`COXW`cf>Xsug~n5B-e$GQn`WOrMxVT`ioiaxXgC0W=; zm0~#=<3s>hqgm?dauVlnLeh8J@GAPx9pVdU)(A(Exv910w$`lh?I};Xa zhk0=11(KSRrNUF``%wZypqd_kCq;eGZBCi@_-3v?MOPeHQA_r4S4?;$ywtk4dWo>< z?-5cg94>nOVr_J$R&R*x)qu5knIlhcaO|e)qVM)N3JPC$@hyxG)>H7$noP-v!(Gua zN67=fvjx$7C4kDb`9xuaYpD0MgT~buxW_dxzwHTOu1i#T>(AwN7Ku^r33_0*JH@_X z4$w!8_JhUE{I%QRGF}k*KEh*FfgJ0iaF+I@310!?z#$-jXxoVZ2l>|Gk5vgK67O|j z^r_+@WZz5yAPJ-r0)K;%CX)3uw%(D?$J6O+ZB;X`y4&7RhW&~{tuWqq#t>HcOic9R zHBoZIWfC{-gNp4U>h2xQ%s#NhZlB?ngq1|SZT5rHmE=ArIPQS-Bqn0#OU+dHkx8Id zK!~r*6K-$_DbK$;k}|%EkOD39m37cUygI3&d)#v!fh-0x$)Nyg;0L4EkqGvS{IMA+ zp{&LD1E+XZTuBSoYtG;T_@>_>oP9!xYj8|9G(`3@^9zkvU%SW1^q)~8H0XARgKfZ1 zj-;~La6PMFNptmz=V^+wKQ}Q{Ktx`BS;9X-J{{Wp_}9bTU-}@%ddekx=Zw}s+_H$c zfic=HF+P#o-u)%Aj$bPt=Lc@hUa84|nB>@fZ?FHqIU4`@SS13(}ia5 zyx$Z>k>OwR5kIT}4FxA%DnG&zpglmh(Z*`ko}5Egfn)9o2mlOB?GBj4W7O7u>Hy-Q z1l*hemVav|r`H^WpiC{-GaBraq3Uk=~(^+EQN2Sdppgq$YYr?&GxBog0=16DZ{ z7s0JbKSH$?WuD*9$Xk;9PuT_O2`hc6Kg6s+nV|u>e$(!tsB}Xi@kKDF!Yfz3mS}HS zt&1x7W&=8v2oq2=6v7oQ(=uJ8qLFfmPCBN_tmXVVOFyQYOR*7pqYG0^@K5||#oV}* z#bZQBX2$Cw`w;GL!oE7;I!Bc)=~Ky(1g*5F-;xLb_+_UERjWK0=2ZA)+;Dc-rRMuz z4tIaUahNqv zsv3o(#aL$H&t>wO@6~e;=_l#rW%D4==>E_fiZ)oBB&kK1Sb6p~bMGX}_};X(yE6R5 z+oiUTs=%di`8dGqpl^ALvD%XjuapewMYLg{=nW?sZ@>7E@a^}CT!hGBb1$y7m4qcoE9H1}<~zMOIE65mr#OFz zaX*EyG6dfu&Tm7RYHH!6Rxs6xbJ!`%CIt$*01OTq8(#)pskql2v;T ztYLO^T*SZ)T_2uCqXV@Z!tJtQ+IsCT$tBj1?ROs24`3Fa&}-Eh-wSzf1w*1wRkH?q6}xc3T-(+}p|-Js8G9XxvU{$^#42v3zs#fhbc>xtbaxLSLaw(fl;hu=0Q?aI~e@dbpdyVi(xZ_aY|0j{OOJ7V6nVUa&262SP~gkb&x zE7{I}8~dWll6t+wl&VB++bWtu%nwLEoo#*pr-wN2vaRb_WWi?9^nwdSlE~C2ZvJ*( zLA9aZJ;jp;w3PsWX%BzwkH&>Fv zGF~CSmLldk8K7znaufzBskq%>^05Lk-kYI?kj;wvdK50Em+{beJYW z$<=FsY4=KSD1gK*8vB|Q4ffP_=pytfVJqdTLk{c{J4}XbVXsfje_WMTgy}3ZaJYGB z{0ZjIJPO*C7_Hk%Z2nQLg;a&IfYzSuUvnM(i*1zW{)rrZxI%fO2$Pu{aB-XYIx&#-b_+P>^7J5nBE3}^v9P3Dh z-!@)l%XS#Qa;|-a_-s9bwF8<}Ib!o4jn2DubocJ}hu;tCsDWk%h=sFFBS*gU%~cd^ z6YCP3+)X$%FXZPY!kT=d&^_GNt7$7f$Br;xp{4jeoSGL!_4MZMmnwno6**hfMHc0d z!x?72qQ%dCAW^tiYS`MwU6`B-;t+jKAPP9qk1PaQ;WqUv(?ePE{vr`46eS3SA{(wR z4?<9sZTzo>@0$Q4mOtZx3`~<~i-h*s4H$cP>~(~5kUwM0Si&PYbfM|b1SEW*Po525 znojGiZa=yZ)!@ST0>I69q{I&_z69{%m`N7+wKd7ds3B}YPkL|o!s`f$jUF!-VBs^K zG&B{~T=rZ*h0l7GAOJyf2t`+LNg&VY`cpf_qB@x!cu`NdtrFrEmjYMBs>ZD}=O7dtbg}1M^-09_+` zjA$1PMvl-MI!AgpNgK+9tPNI9mj5$Hso^TZ;%S1BkV)5{f?8<0eCdUWitKXP+!1&7 z(SXTru}}_G3~&TCBhzQXd@d>6MxINb9}<}p%*+IaJkdLE@ePtn?JwSh@$>>*X}$6i z3SRV|HhKSfHv;56vDAZZKEcOVVd=L~nk*1iRgY7vI{%~=nEq)Kz>Ft|3)uTz@8nc~ zcwSZn6z~+VY5fRs)n&m#5yGg1BBv-I&D&g;^ZF9H7D6BUC5VS69qaZzqYq^$JXv8V z$b$&I&o5`3PiRS64iR?fQ3+1SZ`*-9U3Mq{KYdel5!(LMBCbYE32X_T66GZ1>VadZ zw`arE0El_07B&PKWqQ6{vK5|Fyl#0$C>mQSOhIg^V4sz>y4W z$Z!!%_VcbU{EpO0%pjr=P@&tgJ4o$lGW@u9w59uPGzT#p^*4M-vlqYY%HsZMD z0qan8cdnz~#4fB+TWxpeA_DhVidW=c@W$euAi|ImD2pyQ04|6FeStTy{%=V^9roz{ zN&}Y$bcJvce&RB81qr68D(ob-VS z#CcpqP(sLGb)}Lx_@Doq@xS+fboVFx-yi?i{{P{>_KWqs+5m_gu1aDhUnm4wf4+OS zC9f$oEheUFuhL`R!*H+w!p5Xg7}5IThIZmTy`=sFaGZl3oO8A8>tDffK|=X6+#V&} zj&v)`NKX}$uD7(tEuG8Syx44_g>h3~#K+dte5Ht%Ye{v5==qXRG=*)>$cOs#YJ34I zq~3g~zD6Ml&qmc#uOrO0Gi-K+xX$A=6QsTkg)u$+)0Y!RW&|9nlTcDvY046nlvQ;h ziWm3E@W71vRN{n-O{}3FO!yFk%r}2$EMn_Ink&K0h_oq!;GBSycR{V>WuJYzK5UyF zCs@e9qy2E=F&y8#N2ca@)%75mH^Vo-sBN{Y7#)aP+JC=R&Bo}^;x|p$20>%LiI^M4 zQHGjur!_FBUiZ5#HMeY`D3WP`gsV=kyngxu0scqJ0p`0jButL;M62q2GEpVYz7^r3(z8wn1hwWH?D9 zSsA#T77oe_DGd?_vLTNRC#-vM0EB>BTwO&MPC~ls7CUV%)$LfvZ;xTMg3*4c6{B%xodGgMuVHK-VlE=*q<*3VvzyEuWgomF3_Iegd_uf~R;*h4Sg@TW5}3UAfnP7`gx1 z|AznUU-_f0z0ZMs2sE)Yc)Xg|6B^S^FUsB`c;u}qT~3^( zh(qF+cvw2qM1v)p6;<4q*%#Ez78?ae8at2-9U| zF&b(YcI7lmFiQ8|oL|->A^pKY0|68CtVK5s2$IfQ+QqZTyTvM+LGMZmK}!P*j{*`3 zCo)ec@te4?J9J^dY%@tuTY+qLf!!c7J#%TMa^hDxE`oPpPx=WP8mY2abn6pcMJzO$ zuuA1-N0>`wJ}nh7Jp0Z1cz9HoR)~Vw5gv?O#Be!Pdw^E(=P!q@<$OF_5r#@8fPje|iE#V}P<0lP+J}KA#mPvbNadsaJEV%{WT< zn2q&dD?w3@7#LgR03b1MY!da}*tKNAxTa4fODaO_@aQln#};6t@8_p5x)L0zcMrix z?t2w0c!#@YTFNui{{@WC&Eey90UrRLzb+V0(Kstvuf+y65*Pr+*t)4qpijX)z++SBRVW zK11l2xMx%6mwF7MPv1^`DvwF2bLt6{V9F6C$X`YCy$NEHi|l)j0sqBi^y&0)d{&P;RO7AMdE%@ zKk`{D>Mr$&N1a9VXMPpRa54pdebo~i1Y;8XjD6}^&xqqt^($mBUU#$+#Hr)i7`C>& zHI*x;`-KPE^9_4g`DUN7-NhKl^KaFENB*vC9-rXs0v3@H^MNR51#hu!e>sr4VNWnK ztxuH!g{<0?BAl$b8#nxrdg%6$vh|4RE(@N>ud=QnEehVukrRD`j z=gIydH89`?m&A~$op_tPhHR)pX1+jxn04ME+>y=+2iz6DmTB4jnSV9h{hQ+3e>MDm zZ6AaJ>&;jiCkktH`tva~c*Kluqy5;(wcc>H{>c3}Cf*$vQCsT=)5 z!uw#UR4Fue@T=Dd*4HsIqpEw(px%U(tn*_I`Vjp(X#g_1QI2Y2fKU)0YWUl8X(0It zVqKo7KedhI1fvu?`s3l7|5UuOMH`u`<;D;n4?Sj!R-qA3zu%`qz2VB%lDAR2H}xb! zrU)`~ih5ILQiV?H&{>&RukBRuZm+Giv(7$|>N*Q^TB;oq;MexzYERnalx)-15EYo~ zL8>QoA3&v;_2!p?tttcP)ik%lD%w0Z4GgCxx-SGY)Y zG%t!AVF)PH?$-j%;yS(xtAh^WuiKm`zkn;?jvE3r&3Y1zpEica79cN{N*j^Gfn;HZ zs4BjqEX%kk2bGm^BZh9`9uTEsAd%^GPUqvU&QLN4+KecW zbAzdC@4#e{X!&(#lTfN7}rtIxB1{NJNv%N8uCj?kwnQ%E;|Sm^wt*b<6rcG0>6 zTC^O_#0;$t*1;R(IZal{vIzI<=YjUdYVhXSIA{6Bn}3Rp8E6>HHQ_))%8#J6{eEYj z0-3ALpciphY%;>O%$dVH!r1UDo<&YE!uJhiMYr9(ZNBi3CSq8tX~IU zl;r6UcQLK7)lo0f|2?2f3-*)wl2pjS*^Szy75mUCdFZaJTG-01@}B`ae({3HSutbegC@S{}_NF*wFuDZQI${4=m zmirsT9Z8~h^GL$3qiEm%@8A6C@cv)l{nATEjBAeXQlN)uoF;A862;lRMl{X>t84o0 z3jztD{Tn8gG|#6_3QILdI1o}((4OfS_bJH+3ZT~LET zV-4rBNPm20nQ*zd6?}!YVw3s3K!6s#>_{!C&-@ka=qm4V5RjwCa{X12gUA(XFF`&S zvV?U~=&kJIk@w0ay6~7sCG)w!m%vxOCP}owRgnYW9j)J75d(hR_kp)MnyUobpljg^ zj55)G(B^x6BC!mi5 zEBb0A6~s!161ydYAQUGU?}yrQ{S?_SWw5YYu*1JuCxXv4=<)QM;j4P2%G;i;*XgZe z2AT!#>(F4Lp1oCBZno^hHhz_LUl3S+wmJ#Kb7o{CFrtxdKCRY`M2a3%W>dMxtc@F?om$0TC2 zZR#0BJ*Xg8zlGWc&@{=YxI+ydMg_C`-@vTIRia$uj9Qh;urbuuk{+D3vu6nASAL~9 z{c=fRBE-`8Js5MAK2VyH5oZG{ROEU8t8XZhqY^F);U=vy5c09!i zYlx^UF}e53c!k-DC>8d53k20li9)GwVEu?K!(|GxVl;nC1ML&IJfMq~nczMbW2g-B z2yV;w!p@^e@@d1so!>~QrP+Nf3)=F(!9zYXH&O15@rOZt=vH@EN2ge}l433T=?~vG z3tGC<;^zpq(n0H{gIFl`ii9Ew4+fHSX#q>V?M?qAjmpDhJD1l9JAl!n8 zOiW-uTUsO3nbr*A*&KS#U7VT^*zwpy-Ktt~0UGxRnHra43)`Ukw1TjJ zvk(_Qm;!&Who-aKf!K&-Sbm8 zO`q1*>E*$Yb{{m|9t5x<0FkBdH0URgBwMa1cyEr@yFYw1Jg^Lte>v@kJ{tCaS>F`? zm&~0d{+g;4mcZXAbL>4o4vo6{>aOawzf?d?e9MPONBs&eXZ&a~vA7+?`I$ z|58G&9;fzN0u|J^g`e8ouDz-AS&xvY9emE&SW%})i$W?GVj?FjhU<>a;1@wA?6KM; zXZfK|XB#r9Q&D|-19qQl@KF)3 zH$2k7NQCX%S}6#P*QOGzh1|{%HO?4zfmcgUFQ(tsw@(Q_)=_e^(ygNr*;)_LLxUk& z4%C!4k{{+|1gI?6ImD=~10SU2bG_g@OtAPZi+6&!xR&s}k5J|^+St#$P(LFXn+g3R zh>8z;J?p!`f{1|1IH0Rr#$R%FUBb4xs9{M!FFKYJlXB+yex7+O3g)L%o)@K*BNs^C zM{SMf0MoA36~tXF5{Mbn00J1sg52y%u@b%B1NWX&!5@w4HP)N${XKEBU z-IsMjIHV}5PNYYRhG+9ypK3A&`hZwLtyPJbXbej+eeos*5!7M?<8LU;)4%w|K+v@~ z=j))Vu%PGtGziP|j%b<%rI>8JK?M(hqP@QAs2uuS(G~I`l@*7ywQ?qPy^sD?$K1R& ze5#%f{A9V<3A+xt=c9ZvV>=gJJJB~|9$Zb75J8NkXuyVA8ArGti||m6a1nuG)IepT zKa{qp)nLz1qS&YKU~gv%*Fd%IRO~_4*RyUn z6FI<>vkX*iG*)_8-N#KVb_RVuWS<+>{LyFrb`5Xv%L#kWes<$eU;&Yzw7vydbw>2* zwRd_6`8xLP3+8BL%H)`gEIEfdu(2W!XVYIt(Ya~oqV>D!ZaWN(oox~Wq50TLySYGC zfvl^)15P?P?Wjs0=)p8WVmH0X8 z;WPW%i(bRiZ)2X!0U*GUrhO%(b&2m6QLt8ydI=ugd^I80Uj`QD-YEZngn1zsF&eH8-k`j(cxvOJR_VnRCQWCJL(!9qLDXr^z&5unktmyrY2?M z24E3gqo929n!p?6SRbB0^AS(jBOR9e{l|^Gy^P@a<^}dJHk89p8+v~|U6l%ytoV7@ zA)K`J<3nMke9nsY^G0Df3WokK{tN#N|AoKzIbNC++9jzh=|DNsC@%1;gjz`GF~PRB z0qhv>Fph@^k*#B7lAacdRzY^&Cd9tEN0tnwDs?U=$SsGZS)ArWhVEYAzD00zM}{;T zC{2AjL{PtlfmEizIas4nWJ(t-K?Z#xRu1K-5zRNvr-w4|+THSZcM(BjlAt!QK)ki6 zeQkgo5CX6MjO#r+LdSPb0!0A!(kL3R$N zlyt?<7U?=w! z2ksjA6mJ3h19pk(a+!iJyxtwIY)mQM24#_LAVG9#qHUAG!IA7T8}Btm+~pjHM3?Xg zpR~qo&1Uv{2`mm#1Ps5!cVqX1;FP`lFlW(11H*sl&E0=1;e^WBowd@d5OhzG)&!91 zDZnt-_8cTDuvm*Y`ZByC5<>Z;T#n*%;c3ppjp1c#rYQyG#}R71kMiNz`k)qXX2*nZ zBG>xl43Gb0-fO!E|6nGYUnhwArGUW-Pkxhukrj;4o4EWJ*heEK+jh-}aSV%s5hrm6 z2%65LJmuN&c+J=H4SgLDUsU%N>6B1@=?~DQ9%|EaFLW^@(i>bU`Z-czPb8VUBuaJD z_xYE@)_Ofyiay$-jCIopO)GRru@qg|!){&5wtd;)eZw|ltn?Bbk2xlel{6ritmH!~d{X4qfu{ zocnv5+&`jhoJ-u3*2dU!8}^#p>H2kMIeXiTxWMZ(toDmgce1AAUfH*$Nt^WE58&x)24ybZa0|{KbhXuoIL9Y+Nh%wG-`B#S;_`}tY zJ0t@@ou*BGWfRvp0auSzHVTaabu+* zP6X;n+~l}{qeJ8FF574HDOfXjUf;*Y+fS=p^NKIJzR{=|mBPe1++5+s$#^NsV{CX> zRJFnh?I{2vhGN}RATyy>TKC+7g7_^v0-#+iNJ^V{_nm3`d)1D|t&B^aG)8)bVC4 z@hSNffjfd9IdR1fF?nlklzRXV>8Wdm-E%7SM-^F+hFq2-%vMCG1}zbr@sA1&?2W(?DbypGYLmmkZyg)4#P`DOwTZ8fH1z7$5~#Br3ds zjW9I8FM4xCFDZrXI{$qEsdAIv zWFPGXCTmW#Y}BgP?*ltFzfO=9gebip?6TRV&?OeEJHzvrbvcx`fEr;=jL5Yhs)C6D z3RuB|^SpvqU`t-e&!ZO{X1N)0c7a*Y5?Y=m*`f_$TxwjA&e2cdB(b21r9i4IqChfi zx~roTFqR7F*6GSbr16(oP5?fAKLhOB;;+S`hXq7D~E z!3e6KbT~I%>@dihT|&CNTtv=iinn4TZEQ~oQ&oO!Z2Q7rG zFi8APdR|Q)dKjV^`QQ9yvHIH3h0@qMO+u+olQfA;-h7-+Q@~^szM+3kLjP?IE6@BD z1u%rQbv#vHkhdOO(mD|}i@$crUIZUXH20kot3DuAq0 z0hCSPYhu@!T-1C3`4^nEL=3LahWTI{Q~b1F&CiJDe^$>{CMSD~=;{%#hk3id-AdU% zdRRAGYn{~Bxi7dQ_0}z}wckq4f=HIdN^@NC_f_*=obWaO4f>zlj2?Q8ooQl%^f_tH zUA zJzVtOM2)C1f(g0H6}rUY&Y}a2^FrFkWkuk*sBk1x}7B?EEJ}+PRTCwAL@nK5@St#zSKTfwX1@NGnr@jr2LCsMe zwpf?vL_uafjj%g6{y-Hv)4gwH%iT{+orjzo#Jj(Zy-bR}_|KaauPEgG~l<^Tu6I zKf9pjB0w%;?2HjlribKX1D~+6JM4MeU=s*MccYM#zcLye&wx=|nZA4m^A#0H=gtD< z4zx)E*^g!=XystR3EeWI@}CjNOyH(aC9*OtR6mCA>Z-Oze>XjejOLqc_=XW^oRR@p zij+%NwSgpF=|3O7U&4!mu?N09Yurvp#= zPq}Kr#%s^1AgQ-MrsIU4vq?pj?->15*YwW590~^Rh20gk+QXdH%Maw!-0~hG#CwQ}$HX%=ViWc=y2*pX3>+E)RtT*mMuL;B9KJ~^M_jYJstbnhI zXt-7h&UC0i&A|S`RHYV+@dLGPHu1XnN={7}KtGwC3%nB9brt9Tm9436)Dj{A(^%{^ zhE{*&?T}=pH7C7u>!n+C=B$4My9@qpA;wk&TVkS>5h7-}ef!QY2*=`|5c$L*Om9pg zfoK`D=JRZGJ{ex_3r&ol!tCI;RRnG(qy>NkMH+sf;D(3bU_(md)PpGTdqyYVqVIJR zB&+X)A}^Y0%6ohw7Y$QW3&YTBGO-J>78rb3e+D{EI+iqVZ5OrJiosR7=4Ucz41kA! zp3r~OknZ1QjsXmq^@G^14GOK$NuA{9yop9k-rNSR>3M*BP)Z62LG7Txu6zq_vcGmo zCPyFg9te69jO;{WtmOywaWdJUgIQZV60tTc2)fs2FmPC!gqVM$4WOukfw5wNLNCYZ zpe}Wmj?w(+N5hYPRHVehI&VsuvB1T?S5$)CTiHQPPTeKwIugwF6p|}zTf>)rEQ#<- zT%aY&2@0qBupa54&)d+s0^_;iUh60{5P}e4T~%h3c$R@Yrq|{%__2l)dc`Y5mu3+> z0;|y4Y~d}Pg%6Dn*m~zJchzR_#H_Cj1*WZp#Vn7(J!h00VoI{l`U^gd;`A?Qg7b3( ziH}ZgTZl?^yOIE$YlKkn0r(|Xd4u!UrN@Ke6YC$E>4XL+w?c5Ge-z+jkK767ayT4z zO@cGgOWYAikNn7|uvxtfKuR~DgsS_oKSqP6z!u@IKMX`aG`_#n#Z&;N<`3*F&*UgbKr1!z8BZ9jcImm@<-lZQ*EI3QG&!mxw_Z_^d^(%RA znf9G~ELgp9pBMH-3=ZH`7z67>0peD^A$2F&RuAT%p+aduGQYWuYYoDz74Vuh_L{>G zu@J<53o2P1^j`3@)Q_vfZG7v#2SdDSMV1IMrKmE72D$@<8=~Ywt^1IYqMJY_um*X> zK0ZSDH1Gn}RnJofkULd0+anekSx#;USrOAk)n9IffaTtYCt|bUWCcD_iRq`lD%&uj z(rC3(1(mNNfGB)DCSNjR$BBS=H;P&izq4aozQ2i@{ zWw^6|WHrjl_wweVbzuEAR2%1K$=gSY8eNKln?{amLT|U)gQORhwu$>xP-lZ6H4iaN z+e;0J&w!5NDz#bswxx~~8K8*INGjJm=3ODkXtT?2FX2QpO-Kd1I~&JkS(fc1^A zy!Z%NySJdG1Gj+Sf>};6^d-9xBG{m#z&JuY+ZYsh9d3(*L=1@TrQB6sXbwFdqhP^b z=Kyv2NVB4BG60S_6tP^UOGh?thClH}b;tDR-H+1B&4$EjGd{ha10j(kXh)zvy!&+| z*)*CqwwXM*pk^;VFQlf=W$(WIc$aWnBz~c7q-~@hcj_RptM-O3ymIPANq`c+=;By$ zN$M7gOC%AZWAduNJsQTjbD;mqmU^~me%PkY=0#>h-8^Y4bl>2 z!$b~*en9itu+n(vHiOF+GM)@*_FiwUZ>gV7DQ;zx{f=fShgP{Hi3|Fm94eeue|8tH zblor4L?M{^Gf+CzZ2I`1t6)QhXPYXT6AcY4G@d<2hSVdP20|(o#c_3RdTbFaOe(tr zOoc(SQGA|saEm6POq8Q9x6gNlgZ+(|Z!*zRsn4_n_ z0Ll!}raU_#)ov(Al_<#PMUKTVT^S3*LLZL0EtHIQV_xi%_zKPbQ+pMkscbZEXc}b- z*j!xaz-{IBFkivy)l0aNJ%|Ic_Jd`3t>U>z$K{f$$2@m-Y0569_^9f>;us5%lX=M0 zLrtJK_SKslfov~c!JdGliYORt_<DBZ&@O88k zfHT>v1Ve2W8{)#HIvPg?=m#rf3^3FZs8L14Ul~rRm05)58kD@|&`TkWbH-HmQpj(nq@Kl;>fJD1Hqi)%jGI1vIM{h|$f z2ZU}TGY7iQ+o9gRnM^%sOnnD4_O4nzpV-hBaRW@;eWTIZsyfqlGXjB>vk?`VRh7+45B)7*1$`cyAgLCrlD1WP<+lzEK#u z7VdPzxUWy@{I_vI{CL&3tL8xah-2WTxMk^fNJ-!era0ql+pvjx@+SR6ig-#-epx?o zsL2-?cqs5C043 zY~m?jzTkaFZ@qvg$kF32bfq2;v;OlQv{>q+hqBM&PBa}4c%uoZMH@mv{RCEq)o4h7 z2wN_WBZbee)D6d%xHU%OasJKfYz+Cup%J<#o@^$0GHg8aPg^J+ZvjyPAlF2}X0QbZ zsPXK$Rz3r~TL-zZK5XvQmlQ}}vW&+uUtZAUl84&zXx<9$QQSJBY!qd;Fs;9a&|ak< zoyFWSmAB(uh+2no_JL?%_TAg22Wrhqx7KInq8!zl$4q_%yLG2AJ)Fem*4+{g9qOJJS^jlgZ5n$+tLXji}QeV29 z%jw|8M8;{i`@!|{QzkHAEAg(`^{AO15Pd=X=b~Q!3olqcnHWL}MBz zSAC&=mE52@MlAdU<^pue0Up@6aa$HbxYFd-Pe&#Y`zan$(MurPrv z0JCjx_aX4Cc@H2zWHw}-mWGen3cPzqQ2hTL?*7)~AH(=IF_qcv#8G5mS^8^&2n=7& zyjKCJmMeGZm$SZEI}3R6M3ZuNR1D-u=pqk>+;Z6`qcaVe|NVzg&xJ`xU& zN-1vpw+33%HBuRiL?W$|3!zy$-lo@l;I0OcA*h1+o1Kl&vFhAp!9lBF#7&ug3RC4o0VVY+sJp;$u$|hh#Mto7pVs%$^JD|n^TIrHJ%59; zs+)kI#ssau%EZoqp#n);^)MVpKApYm?%pxTteeTIV5|qB&0Ac}g5c(obnaOcD(T#S zJt-hus|UNNti8MsHwARGF+6? znv{YlnxM$lX0;A`Z>dJ{5S^m@f42F>TBi*_sG=36Gwb^xR=Vx~P%VYM9TnwK267!4 zNySk}>#4g$7?&%T@Mu<^{|l;I%_YYT5SPI(dK|iXTe!6?@Cd0%@kk!j3Ri~xIH>1< z1jhSPT%iV!Gp~AjpzWanih%1Vt_u=C#o?M3nz5V5$cN023tUbZpr^wL(4SdxXY-*h zhT64q6WJZE4Hb%AH!6zr=jNMpfSI#t7&3@*_C%pzZJEYkG3`+)L``~K6a%Rf57SsoG0L@YIuQ#+F+P1jXe!LZHTB(>spW@!MNY0K z5?}z&O>8mizLyP(HS58xfjhF^?`GgN)&Li|9<1OpzwjqEng@)&uZIW-LcYKM%<)km z<{l|5H0!ujdsAPP2RT3Vh$ex?o96Zh@N+;a)M`K=OyA~x-oc1Epgl%?fE`he_`JUV z6-&Vt>M&G{09eEdjYoE!e_l1&DS}owe{Sw)zgX_Y5Z3-Iyh~U zKWmY)rXKsj$C|f!6s<^Lcg<->;A9ScPU%lLUbw|cDVbG-3F?h%G04%^kCgMmE&WG1 zI>X(+ufGbFkNx6P>V+v@B%NUo0I#n*jpJKjZ44f_&ryXe3N}^cwvpch5%TA)q%)a`M$U% zG{4lg^2x9nlOm+@F5;#&pHoQ4SS$T}rJ3sfXlRBx3mcS*!FdEK?^~xe99rCqo`$)5 z_Y-0cCKh3LIxXuJ%Ou@PFfAc`VElM(U+U+> zmy^BNB=Piet$#G^)jwy$&-|HP2;s~A25aU7uqf21#Vbi0*@S((sQI=1hQlK=(#TFO z#8OA5HH0aRQq1!dVAAcvhay{zZ)FC21qW+EK+`+k$KzF9coFXb6OU}fv3bI^1W#Z| z-;^!Z*M%LWyB^QNy+x;GP71U)+zGHZnNf1x_W(n>HxH7;7!P9Q=R`QEVz|q;<2ybQR@;hULTnC`KOe z8MBvqG-J5u{WPaUXvqo$mzrM%t)f>!l4_O z#)&9&_Y7$UqW~0u8XT%v1vDD1@J3kNz1{HL+IP)^DAF>ml`P5DT9zZa!T)Q2&s)fR zvc?W|gs1^f^?mOl-+VKld@@t^R|#g{2>=uKwAKbn^6}4TQyk)WgrG=xn(^c9oBJPj z56bO}<=A)}xQN8!5mQf{K>ypeG>wneVyy{;07f-Y2Vom36;SiiOyROh!piBD5wANr*SPsnf{ zc`m%&&8`#x!uT3ybOO_8Kc0k5$M$+V_hC6NT0gC)?LlJpFDutM%buO*->D0kg7ZoD zDBiQlI*f`HyT?6r!xEo}e+c)^v>3@9GMDt-R{=y65fcqXA zv`VO%=1Qs>Ux~XbL0@zc@7l_G_eBgB$GRfsvlD1ei{2kCPtZncZ60fP2F|2)jh~6J z3bQ+pX99&-g)3%OJ8xhysEUZbUq-|XJU@94d2w+~&}+9AG2Ugp+*yaquz;6}2uXK& zqZ9xhO+p0dn3>@UHs|lV!Xcz~VoqtD>q>na<9pPKD?JBVGpKs$^nv=Xi-KMj{#X%{ zJ|ngUm&w`uHm@KC5OQH*e%vbR6V=H42-#j`rs7v1feYo`n}g2C!bO+(ECMUp`K{4+hQ54-F=-KzyDG_6OmOn82u#<6KS zC*HEfdzC;~Xt+9aF&W7h)?GY?*m9aUEfll99*b~!Fn-EY6wItaZX;ds#(XAjxjN1asu)-EaSuWeBA* zxXz*`|E&9Gz7lA?3}9WVx$SmOmi$c#?PXzxf)_yag*SFkgc~({A@)~e5BSkksM3dH z>I!3${1!60s!*0cyv=Pvsoq#%ba7!_59o-Zp3wgI9-SX)ROCfaE$xN?cway2KKd}8 zx4vBSq?&@N{2dr&3+NZYr-@&7S1H!Ajn&+ecR>?kI>IHF{lh=+{&|dTU%F%-N3)is zb0UT8(XxfY-Xf9N3m6~;@y$JzE5hr%GyT&WoW+=5k3$?YUtfVGvloAX#97#ACpW;Q z2L*|n@L*y^RVmH28B*$((HCROy-k-83N^m96wg;5E52w<6w>{80$?_9U!|1qiNl)b z3C>Ef1y>QWn2fBw^_7^+{ZG12e7MN$pc#ToRiF>kq80kMy&LbxG=dW8YxDMvvg*h> zvT$-k)zpq<{#s6*N<_-DvQQ9KGqh~;x65t5B!}IV5tXZ)w*g27 zus&H+qk>*!fm{z1_;aBi4jUy&@dpszN~z?PSV7kuAB*RUu5Vc!zMA)DTy*;nu?i${ zA;u7tG};>L+~~HS#Lm3m{qS3{+ElG?QJ1i3FS|6HpQEY##P#OpQTxV&U&l!NU+!>SDckTWI=$U={J9Ue`?wWI?X*tRBXS2-M9Ei_{h?8V}Tk7?7-k zTt7^-T9*V#rdF;676Z&>-=viaD#_Cnqi*ECSgw{4Mb`o!Cwbi?P`Ik12(~MpRGy_^ zqsBE{*rh{QdM#bg8%!ha&5;abbjcS7d3VwTn|qZ2^yd zMd`L_f)CHIglt`)xe-oJt+mw!m#Kix3in-cN&vkNvJeKO26`?;|Ag2tk!F^xk4aNg zID;cw7zHBUCS!#VYRy_ZV!*61C66njr&fRa9%?%{JoGe!QJXxHz;6+^UGZ@pFGGcM za}}j9E?LFsUXcalf;e!C;OS=b#CqLl)^PJGxD`v_b?30N(4|E?ZK?_#p~=`>!K8E=IofC#xw+oYI={X1zG9 z)Jvo?7XP#H2?y8d3DiJLunJ&}WZ>pg#rLs*NK2*#NIZXO9j5r+7om`WtkQ-g1Ow4B zzNw&-i2P|h5K${9RHco^2V8Fj@3|wlQ8=ye*7ty|z-BD1^G2~}X%m4Wjgx2#Tz-&6 zJOPBRWpc&bNdRIYYqmCqvkc9-Q5kdEVH*C*k>Z7AJ$qvW$f!?xD#0n0aB^#IYm(omqOd4>omF+>-c5v zr$RxnO@YW#SIoK}7n*{Ai~Zuf03II^5uqhRGqPij8fc71Noz3a&Qh1C5P9L1^$`NA zGAtswCi!7PoudNbA2xzQy?+i{;I;Z4>ScvwRe*%Ukalx_Ozmn?eabb$Vejq6L7~sP zx94*-M+nlqVuvU{vDXA98c#8%4fXtik2P8xj74~*e@9|BdPnz?Pczmcz8?^EJciXP zOTNi@Gr0jo@49H%370%(awQ=hSTU(qTd=3&IASm9J0Uu9vr6$V`r*ta#Y^y-h}){5#42(?7VHc_T=)8V_or_m zX8zirB}iJ%!;%y~3mQ|sJ8EUPB8DzD-fh(CB=+HbTDBC;(1Oavh}hvY6xF&hBsn^jvM%660*O)WFGOtSbj8|Fh#at5gLPeg zlgtmFwX2DmQE2Y5&ztDwa_m8!N2UBY73gHFv6^MKsM!J&mb*E2fGn`#th8d=L$^;t zcfh=gy;qz~CN)Ef*NYonc=vZ_H#P0x)F<)a=HQHS3pDo1JSU0CV1K^`3)fb<&2p=Q z1>TF3s=STyC_|m=Dh+A_rGZEwfa7fyFvmbKMYXe57x*C~b#-Jri^=tB@XA5G5OvCY&%@AlM*9*hp+=*3!JP=Epqa-TU@AO{FB2FT zgRgq{w4-Qjx6oRGJwOL)IXje(VYpVpQBSY0ylZA-tQ8z!OiPJaT9wXpd~06PQkIQ} zu}r5IP0}m3O?7UX&c=c5k=NCa8SSv!@oZffS7qZ{vMYzHn5HLcJ4j%NQe5!_(En5; z^4t$n-z~$pDHg2$cs}uJi+E@$LbPYF8VmgtvkgO}6qKRutmJS|e$^uGlyE2z0t8%& z)8nDleP$@rl7t{=JneN6(H814flXEG1|AXgAmk)j>A1;)A(9z_EvjA0g(Sa^?qZ(X zQR7w6Y(CE-3g~4mO0mJyih|&3RsAFs_gUJMFz{Q(w09a-4Z74m$-6&;B60g0Z#@_qrRspPFd?Fdr&?|m>4EeH9$;Q!Ul}VQAb0?jZ;3CXYnk;p*>;Vi@%AtL`iWj< z<}5q|T4`9Xv^A9q48g<-JS!fD4%jr~5IFnua%>DYGz*oUOhkM z3eEIqw|~>>THXFlocw5u)%s(_vvDCx0`Ze4-IFIC^$aMM-9=F9dK2z&(iBPnFt3!% zV>?KlJxGaCXdDAQH&B-+l~BAx2d!=yYAvSD-LxP7{jBS4#70o#BGz6Il893ts=O(F zQEKchp=La6EM1(X0)s4X!zy2YVnN@cI0*)38thPNuNb%=A=Sc7A1lYMz`%k_8m@?G9>bWtO}lb2YL@^u z@|uJ!cb)vM{&gRCDK^j`v5|VZZXUl4#=jZeAEUH(^dKBCE#+z%h5%SzZLKuG z)syXzK^U$j{MnKn3g8W*!gI}eP<8xR?(e`xs+TRbWd&!8EMU>@qg^#8|7Md+R~63n zZ%AcBbqz}VWA*cH$A=Pb20)u!uh>kSYVhde(leYFCx}KL@P`H!+WJo+5W=Ig*b>t* zah5=VG@2}Y!2lDL zhEv;N)2(&O@j>o6xqlX+zTB)cW)`2oR!7RRE{@{MwdGY0`BLIlvEO=KuNQimnUS!J zJ;Q5Sw59BUqDHlF3oaeu7|Td-6%9XQO@jTjN^V}di2&SEheSZcJi4Qq#rtG1x2bv& zAFJXq3aC6vH&MBtsSl0ag0Wvf1n$9?`Qy>NS!AqLiyPejCVp%s{>BB`TV@EggRv_8 z!1Xu4sIo5{&aBz2(`+Oe;I#ybi6ER8yBmFhQu1kd(mh-EiP_%2E3B}P;;Se~tsys~ z)dXb{;vuR!QTsKv5@Q7Fy@V@j*+tKV&>x>bb?^p`$%$fym^ZP+`>1>3ScPP}+@wH7 z(Y$Iqh~Sk)a+8yyXvx^`?SX=k@mvU4s);LX6TK8$j(y27C#^5$BSfWA^q1vyrmk$2 z2`WZNI%EuDcR0bUY6*rh6>bn6_;PK$fSeG2{TBZ^{x4n)=MPO9e+92@BaOexA2#uT zur;w{&jJrhrFr98Yb+ao^9S7zOKZE$!Izj3_ADT~AYSoR5GFH4Fi=WFfzC7kE_RV( zzRO!?`9`HRMFbfwbts`mKabrc!$!9c_2zPe$pCGwz0er8S;TEc zd^IL7$5aXag}q4`_!dQvHI|T-D)Em2#Xx`Se&QQSulR7P9vjqOy(c{}Oxce^TsSd+ z_Sa=(EgTA8p#++DVPl~_<2$M>LIs!%8HxX1jo7O^n-(RCpzrzn^EjY4L)Fsqg257l z@>1+wE>ILub#5Y&&Q~J23EjsZ`3=JCY^36{kEfHuIDE_z_i@#S#dh-7QGjUEkO<7- zbikkm-1lZTuj`B;KztNC_CYyb!o5!m1WZSHMc3=N=?ao^xTN|Hv1nlXrSRko7ia|O#AyQ7Bq>_#kP<+ai;^`35dhQantzxSpzLrN^U8o|mI!;hj)7l2) zUjovURy+)H68CVd-XHL<)BpG?pIz}K8T1+EXEC^x3qlKdi?)$k^pa;Ypvov)Q}Z@) zfK;&4lcy91*HR6+wXS*PCP&8M$S~*_uN*d4MwiOn%5(+vb2ZbCPt<}hw}I|fcGHou z(Y$CyY~dbPBx31_Plp6`VM)%4D6e&!F%V(x?guch(dZN!fU;cFag+H{cvD=@+N@0% z>9~OG(2qE0m#}J6i7UucXfAycvRI`$*n>NWJEZRR%@4W{VqdYfbjj9mSF5Wnd?SwC zPB%;C2IZG^kELlvS4L%T!Bk|_IA;WTw2t7Jt@nmD>)=o(K@4PK}Z`=<~+h58%Q(wv!cE>$bX2*E50&FOPmLD3e&UZN3Qxw zQSM|4V}zA3#rjT{gl@@canp%W;AN%?eLxcvFEY(^FI-naLBe_!%BH~}EWp;CvFAb_ zbUNlzImsRs-=fF6Rc$i&IHi^Zvi*gYRpK1QlxT%oy*qLceDSD~lQ0;c^d$<(f@$#F zSVF>acq4ub`jUH_Q|Q?qLaw+|;*A#*FGn(J8uIeV6-jqUXCjnM)zp+=QZkN?LUM4HoLYBIqTT-Rf4M8p zN5aLg-v0ddFFXFnV{0s6LQObjO zua4-F&ujO4{PVM)Dwqu;C_W7`aCR1G-s5h`pCplDa8vFxyfXRL&eapU9Am+&I8PQo z;5&Z3l08pjBFOQB*!q@_mZ_DGyA?|TF)&CE#U{lw8`Xw+ga8n(T0V=!Ub+2VlmnQD z-EjeCfUaqkr6y_~@ZjuVOo0h5Prn>WgevVCj-;uH%nlzAAR$%3<(SoLOs4^S9!tW{ zpAkoxw`7W0&uwil&pMhruP#L$Zqi(68FQzekvhKNC3_t35-`9ZesDR4*v`=cmpYut zS}5)*{JL5;0ff%3tB@MPFd+NYn zD+Hw`<>VEj5+v8P+yVcN?z{BFluhP!+C(G@$z;Z6Q+F`rN(rYHT$Ne|zAK@|bHRBG z&UzkiT0!HNd9#ZSd0tpz0HuI1p4+@sSG*82{8R4?g9SP+vugPg)I6{d>8y|v7vBMb zny*AWka`B?`$wva-4vNu1-YDI764uOcYAjf1@~M&>@vD5Eq9zth#)R<{a0YKf!f7h z%FVt1Fn%Hd16hmFwn|rWV?${mRPGcck=u6mfx{xujk659p%<9(j%bnM!1QW7>?8O}BcYAF7?rOK^4P8EI)gRcZ_%wC| z+CHY$0E>n<#c#yt;nrmUmQ^OmL5wHt3bsq);8LmBJ72{KejK{DDMSP|zGm>D0I!6CIRvo8Eq+hQAEE{2`+{m8MQ_zjt zX9Q_3ksOtir6f~96ewBWbg?EALaTf6b^IX1wPf)==N43vlxeP-4(w`SMLh;ukqUA7 zGl+s|pivg=p0AL*cvWRGFN5^LF+qPy|Ham{3I!i~>SCH5pt~v#sUQn)&TC_PTz&jYEM38hH^I z-6>gc+QbSNQpOavB+Gtv?#~iuxtOU2hob9o3_n{)8kM8&cj0w>STs<7rF$B8B%I#D zPE^SiELgs+O)i4h@1A&N;BGNonj(1SYoL_soaPhr5@Z=_lz+ikb^FJQUv)=bSVV&N zdBC5I;ja7XaMg))RvU-*n`|G6oc<7=?TFxax+naZ!tf!QmnS$G{6e<7$HXl>!Z7gE zIJgStUY5XCWVFwh#NS=Qf`X45gT_w~MLZ4$r@b&=;Gihv<@Zo#SwZz$;R^f^o1j+q zezFKNL?)k!S%qPY^HH2{p5*I_5#VbxfY|jKirp!S{OSW!RPk87jfR=G?Sg){i=KWQ;QqX zRa~69#t?V*eAwwj;nlnx1Vt&CU zh-U(baRS*@Z`rjX`zJ%hImRKc04aPe{%qs?>H^~up;mM_UV|dVse}ty zHkI4YA|QR)0Hyc_J{L+8c2BGf;F%d9?)C1I%?d>8Zt)=T zRtfXWSIA@PaS|;KJ*5d}Fb-)tDNBHuACxO|k9#;Y6qN@(G*RYcH58q9amNmh^?cl9 zxIFAJ5&8II3*X)T`}nc-L$5AF)9Dy0=BEPKwqCd;M4~VQCUos&rOS1JfJ*Dq0UXM% zIHpfSmN@w;;=-=R_OuI2L6_Jb%nHXUS=+9}x;ZkWO~zNb!UWn{yi~M#d|a@!T`7KrVIhOvqjg)%# z4y84e^A^(-fAafrbuQzR#0zn&)C#*qW|}M1+~W8$s?x36<%bEj(>q-v{ocLqUgpTM zyv;9-?+e>17Z(Lun(Y${i|2&*i2LU;^VG1tx>qw zycMpBiIZ1UE@*-XkZ}?=W*fRIOU@EkGM;YXmgx@D$>ZL)Z88uTxVQ}@`r^49=MkHisWJpeu!z+@%9>Du`xM!_4!PN2 z|I#&+c2I&qam!JziQQAZ0m-je!vKc?J_g-+eLyYr$5`>3&t5;2&-cY+*O3{gB>7+?jh z>78*+roXvQ+uO2ObI3pM_1iac%*y+kCeDKl25?n$$vG&WPatheYVr{GLrXC!TlbV3 zyDUh%Y^%3RknpaizgK`NVqE%VyAS!=bV1i+(YF>*2KA*$xnoO)&0X*7&|biRyec)@ zI|?xVyZBp6psGdO8mpCX?nSY>QRPy_H7amP97z{@0en$=w!*nM#PGGs5O_Vn%`JQx zl}EVm$)7_8T4Zi=jjCjhg_A=ohm??~AAEh6$x`06GrJL9@2Z6F{eJg@_xzQaA{qk4 z{AB9op+_(y(wKaW*oXM;%J{-F@Txt<(Mr!8m~4Ue&3eDKMur&D`bzoBvhV-W!%?$V zt!`>lUGaFYML(g@cu1T`xka@qsC(%8q5ImPLm=(Aw8)t%^oRZpa^^9%=F}xNdfk=J zu9Lh|U>~CY0fL7t@%y#4znIJD^Phnb%NM94FpK~%) zSjAdL9J)JqEEWbs{cMm`M256O9~CuCCk*tIi(RN{^e^6~ZOJCfW{+plV1PnQ9-)(68U^erPFg9;6SWHnun)!hyOYu^YTMO9NN{WH9(QH;u<> zABrmgr#S{rTorPLKi!{uO^Miu7f5(6_O7paS#Btwth3{TeOF@2GXOE6vTRYS4JL9o zPxN~2r%Z^NWX5`_@p8q=W4udbm|J@(uMk3>_kk6HR}%Z6gMlE$!Bj;6K;!hBTVysK z+$yeo>Z#Gx3+;i0>$x<{)(NE4j?wdzA|o0uOA$v1tnY!pJ3~fEA&)=ed_MHBcA>(I z(jVhC6!*ch+an-MC~MILxx2b+dK%6QhLtA-xpELML|rDoR`pZJoAKZt^FgXi#ImWc zWTDp);kznVm5%H5Dm9;aITp=`Hp2$t0`c$m={YC1&q;~k?x`*;B)<%j?y}r0W()z` zg2K%iz04NrA%|Sf({kO)-I}HYO(|}0G0fDwQqIW@OvNUklm$EjoKZSB4V&qX04G46wh6o!Z-MRp#|!{${Dd zZKgtl2-lweD)x@@E_?*;Ly7R2?hnrJzu^0ZVD$Rk7d{64R;0FY+OOq$CFMjnz(2${ z0ml<%{aW{JPmBOuD~kM;kaz17%5r)Ft@JiTU1zI^Wf-HuWsoCMgPXe=Ol)3c>DFaJ zCi#e*@>l>knpC^-Zn55`z$;H^?X~~Ntz-@hQdStdFhTMu^Kj7L#7UIjohMPsU8StO zwQW!G>XaG&_id_xw^f=|R+10+TDfKXKe`^6JZE2yl z&ZUZmvvjc~RBV7IVG-;rJEr?Q9zgFW1?S^OxL6YX5R50-gXOF8JbVEGh-50TbY|-$ zu3d|V?ajlR-l)66KlR>)zA&rrP`L_t>M?*G*I4{FYVTt&FS-`&3qE;8yTDC1`m4@m z3Mk+XU5SAycn-DF13N^q(XlsU3Y=2zq3!@~Oo+<$6)gAL4#y}_6mcu^=iFPn?n3pmO(-NIVIME*dMFzDeja9@q+eD zK;nKBG#-+DMrph~=?t=!ll|DGT+V?~T1yU0euVRv zBDujAW07U&L7noCFDieKC^h9KV+Zd)>>k*pBohkERN+9QF@!S&D8$ngC51N;3qnEL zivEbdpe2HY7oXsW&XV1s?d1w6nE`QPIf|Bn=d2-=jd&;VDeTOm?i!jZZ!DHaO%L6i z5gA06#Q-=r=I>CwjU9`xF zsQ0>W|B=4=aH5%YKve2q$17MqD6UkT1hh%f|e^72x*{v=+|N5AQLvCZ652wXXiYSOY; zmjasHMC{x63fIGK_iQzuh+U>cjKWTWpO%d*dPSKjle6y)AhWL(`t!01I!CEgZVb^H zS(C(8LJY?IV-d!skd9o{0o1*`lqjCN5=WNxD;YBVXWjGo%PuF!FF(9)IEXzZWeR|D z#ys(+WKtCD53;&gq%kn)EE-2`kVUo{!zU2xhhYA3FWFa^_jhew+Wa7;NAKIPr=b#F z?uc5xHFO20r8}`!U>aS_+g*58jfDZbvRoql=KCC#lQzRz=0?ipqr2!sKZ3Gpg#V!u zcziUFQsoazL?3jboNn&Spt*MvDyr7K_7;#VTO|0Wna1R(H>O?DJu%lT(E)0 zq{tH^nPp&?!vrsM!bTxuY~+`5cDWE81%l^>>*iGkdOrlEt|=o;`PY1gBWe3o1puyR z2@0%kbx*8aW>{fL&cAa)Eq!8D*22vQ=j#f`6BL{k3qk~WF{)4)KDT06NDq7Lgvpe5 zRdYe2TbcSpdF>mIK9&Q=wjUV5Vm%Yajv6Xh~U#J!Z;H)R48{jCTQX?E_Nn+I? z$0~1xb2y#-Z<+1Xz59!||LyI6{qO%bB(M>oA9&8>(ol}8me=UPb;vhj8z71f6UWlgmzOBGn9x~$^~A6eMjO1z}Jku!PG^xz{I`V-0=?Q z;D5~zxy5F;mD|A`@++Sp(|&-$uSG%#OJQSZcl`x*%ZSnUh!FY9>*UH8k}SF1sC#Af zdjQvhs%TtVR5*#k9XED7rrAyPF6Gdbyx3zd1awsnV7XiN(+?>k*SH9_1ynCq%djX4 zeXQG)qF@fk@_$2A?%2Q|#Id=3^Dnx85#wv=#4Zv}Gq1DU5%nLw{qyeapT?5}b>ov{ z5NSXViPIDh78G1|Pqom}`I?t2uYv=}gVF8T&PZ4q(HONb;r66!z%dOcNLg|r(o>j= z)WbrwMhC@oBcPdOP?gPtRC^6u9Bm|Va+w+V9)ID}s_MS_ovnHIb?OGWx$Q!HJIS5%X9a{zNgc9XX)|;w{lO)}bKgLMOc+8x0&)pq= zZ@CO_f9fsf<(RTwwe!(|vYRkS{Ta+xlydw`(@kvAqYOD8Ds?-5n(4wQB7tUNg1tLy z{N10t{fmF>|GIzdkNR8K+<)BtvB$iNIYRXi6iLyWbMuQd=nR=Dee{UyO7(rLOIz}u z+y;O5MwP8+LzU);W5+VxQC|JcbCR672?o;`)YGmXi==lc))S&fB=%K2i_*x5y-H=r zU&-kHICcobU4eQ(NUdpV*Hb_ff;zO0SezR?gR=n$Pd(k6h|+Ok->A2#04hHmo)9O{ zlKTX`gU0qRo?a3As%3N4F0wWfUK@5L_Y6De^eVnB1^)Iwbnkwd^Q3Wg{P$*OU`iJQ zCBnR;AXjv_xdI}LkJGfbm<}8|?^?s3nHB{+?Q$HU&P!S?nEZ&p} zw+Py-m_|>!mC(V!549}iMoZB5^AxiZn(S_(1EBX3C5GkH;y}2E@km-vUl#*nM@{3f zeuzcK`U!Uj=%_oo!|id9L*({w+9IancK4aTi!N7QaxCDzPimGh)h*d+q~2l&o8T;O z21pIcSxiCT8sOp}@y+6|K1e`ARo{d1T?oaEcRr)al&{7<-lt#J!-Yu8K)E3-y$AoC z%^*eBy8{NTyX3$(u=r|#WGHNTiLt7;Z%9mwAJh}#Kg9rKIo5-JO(nWU<@ae&9>+M% zdQ zYw=iaDDni%T!G3nst955#p`fATtJ=>@sMYb9t2nrpLq9wcDH}y zjoPZU$ss`^CKmiXNYsXg!Fla46kNjJyp2DyMjNn zPym%=SFW5Zh}iy&Di+pWa#Zyys$dE(oWKKRffryAag*WP$6)avMkVOdzX3%dKAi!c z%kIdQF_z|>DJ5J#q+X#6UnU}I2%lTnlcx>qDp%|lG|B1M-=c^b>OhLWNYNfLC-fkO zIwn3_jtJSB3R0Jq8;i4@L?o(gkenJDJ0$y-I7Td?N{~oy;%Cx`0WA;n@qO?O*h8cJ za!3{F0EoBjqP!Hssu|d4XRR)Ga~0B`#&8smKIXBy3v0;IAaWn~YMk5KH?Mf6MSNr4 zieNo`JcSydgZmRGS%e(gWy}>XuDdWl6CTr}`e_+-+=Bj9k6Cb;noq9mtPRhThhL~} zgolVcrkVMyZ%0KCOtFUOQ%U-}kI#`ZmoZ!>DVlWZ z@M1lo9`UfaFIy~ryc@s_0_I#+lKAsjCmXr0Afw8x zA1EC>1~G!RHQ2w-8(Z#QnImqs4pFK2CrK*~5`XTJD$MIoi^V`oPaX$@c~pvz)FOXg zDyp!Fr1Cl?CkAh%q;X<~g*+gh7RTTyR$pd*;ZM)Ao|{qZ4HY5Kl~_~@>EhJ8L#*)A zkS`F1d*KzR(B41xk0N~k_&@7S-YC1JR@YeNqV^c*uw7tV!W-nNgR@(rg-=2(v0z}( z!0@DYPl@Paf1(Rn(F^fIvJnSZl_Bk2t%dEc2c*cx`g#1IpjlyppK#radzdGTK7xpN zVav^v2Dyu3kFi=IBg$qeGC}e$fzXmLzCWS{qirJGFBT~D{kQM_b@3lonpl5E9d`s} z$08{$f1@8#ER@|B%og`A3D}VRYO%PD#_{#{u}&VvIw)Ca)yG;Vy5WtL9R7mx@$2yg z4DOl^t&_Ky?lc=Vpj9?ZICn9C7toJl1-NA!)E*l}BDq<2V<-=_X}&{UX${Vp8!WLSm3f zj!LkASkVRtZ!gY~$ zFSfvWZq^)*bkSt7$3Bk3AwO@Sdk_vLK_FjJYZ$;N6_`r_Mv?z(mB8v^-~xU5S5#M+ z;4D;lOvZ+wrGp~SBu=ud_FNyq_bJnqfyE7#;}p>$FH@G6T3y_aJ`Km5s%-p&H8`qb zfb>eyreYn^9aN7Fa#!#w@L07S^G48L6jUdKMJP-TM_fc0JM|KvZTi6bENe_u zXOI)eFx1G5J&SFB@SyuBo~{A|%buHe(J$-7p%ZHKTo9Wq zHZMv~sWu^eqB8rm?-Cazzw$WF!;5Y?7M8GFN-SEKV3F5nqG~}4*QstsU?JCp?Dx#^ z1jMVBfvEBH9rTEZh-4;ji!FFS6zdLECH2}on% zy?a?8H}|gaP6e3) zPZkBZA&)kut<-UBY*eQ8a-*P%%N093?>HD@%;Wr7csVi->~12>N*^z=40fdzc3X)0 zJ(q)!QCHk#7THrI!(x;M>Ft}Ft`O^G`c}vpo9%&T14}~5Zbh)b-4|$-YpSJrk&^u2 zQTK_b({5SaH{S3iP}P!hO1HVf4jWU?f6rpq_cGL0URjfHi zoK#gTq*~blY4b#{4Nj`~MZP?g!ObtP_A@xBr0H5phUgEDtRF!%_p8OJl-$~-Ac$;I zJG&DRxKdHe7&e&T6-!shnxl&^xlbCi*W03kl4sRD61m`NQr4E&oO9=pGM!5T?uBVv z3;ROk^lr=s_}gmOVJlVTkYWMF`Z#m~5Nj8J9^A(ZgSW&|@TY2No>o|Xkphaa&1+iQvjxdHpXjz0HKXhsaSzTo z@q`dQ)HnEXnaQhWu%dW9ydF9nu7!U=WB}LT(AEy7*t~#(@Oyu5UNs`sDCo(Tl|iLt zR2(K`RS{;E4fPUx!CeJKJnuk}@hjvd=tLdkr!yC%2asq?0|?=Fgt=m|%pfvK2l$QH zXG#MuNHpm~GA6tv6Z|>u2I}`)cy|ee9G3vr?1BOr6pzfILO$Yzg{GzZqUCSGWrg%1 zF}X5A>iR2Q&lLsu0LUB5KNbToQuH^gBo3j>884$)!2+ZI?C$NG${CO?a zEHY(#L|G>aM6(EoMR1x81=4UBQ+mHE#rR(W0SJxt){}rG{L`yAykK1cH0u>i+zz=) zA4Zay`krWWJ9yqIRJy8EWys>{fJOAOa>a6?k*7btDCdBCcd8ueMy-jJY|Qd{rnx%r ze3LjAu(`UcSKja)UTmsHfWN3AT1qviF93w24J;WHtKG9*k6YQ}*98ZVg47}N*5NyfJHfE! z6~@-7G{f+$42*53eyJ_$bM8OC<~eYLO!rt0n63Z$_CKo2HUBS6uVp&!`YWgNz*u>O zR#3;mp8>n%u)8p|LYeTI{edUkTjDdtD^6M6m_p;t{Pth@Xu!k7f{?xld820^7lNTp z0b*BWL`OJ((gpEI@HciZ50XCq%#uuUWgeP)_o)GGWsx15eR5|kI)+|6nu~h9+wRBH zrmzSui?qEE`=K~@93y=n>EgV2imNR*cZ?)1v>_cRvCsrB&Cg z3WNj{maRB(V4g=uF;;2S^-a&Lk0=m1k3AtdDN!PB`Ak|2bfv->Ixj+4YDIj+f>7V( zXm;dr6P*=js&WHV%$q?Tm+jj`E1fJ5wN{H8DRXC4LUI>Zp$EpE0~B2o`Jioe#eAL8t!-ZNa&8Sm0T=Uf?VK zv{3RI=lrXjpIS2p^37EdHib+I%0~Q_2v`h*=$k0NW z6LJ;jiB%F#e^S3AAXTp~Ft-}z-CpaqJc#AyD5k9pj?PtQLG=-cSBTGXr^n*@-0EKV z8@hbt3s$3R3Fa0CV;mQ{Zk*E=F@989B{yVjhtQNVq5ZMCt|4et;(f_dNQz7=|E z+7c-!LXz$}9PTf=owA#o*ci1$1q*XWU(2Bh3!{W#3{9av@~z{?#>1~o?g^GWw47nS zFQ5m^673VK^xCbDg(3}-V%5ls#|kq0J;L)|`|S59w&6Yd{=9ouEXXQZss84UBH{7l z3(TyEqe0jTgqjNB4xH%lYIIQ?S=Jh0Cl(bbUZ{N_5wpzq>@B=EFPU7@ zBQ6%7iPl;`nf+;@kdJx3@A=dH>QaPsYAMC0s}{>B*-3j`L&dHKELhB6N0imVyg_s^ z32;B)agL*Z0gimq-Bd7i!qQX!R>4ot?GUYf?r*J$(p|~0b5mCUpr^nbIZXq)qvj;v z{i1vO_g)uaV;s-2E?E3{JU6K4IQ$4)H;IK?5qs#c+ar=X@o0vvia=T;R05jX*+BcK zPajwb*mNwV9$Kj&$n)!)^I`xK)k0jc2xAGzyxZ2#s=LYnuZKgkau#>r{n) zn7O*32QvNY^)CqlgDK^N*r>u5ea%%+Xg6HWv@FE(4^jPE_x`thE~LV(9`6&NjcK5- zm}?=rs^~Uptoa}qKPAak!&0T_WH+KgF>KNJUi~ea#vG1H5UWR$Zc*iAOjWc9<4LZb z?x+~lUBMsk_)51E=LklVU!`IdG_bJ%sm(HLpMEKXHw4vIwyqvj$ejlm@56v(e^Mn% z7%tC~l=h%|nALS|-+bPE{<+^bC9c2E&Ewx}Xf9|wYr_+OI~K%gSkw`?NfFnzO&kUT zyV@fn%ez1GBciSLgPFqIodE}mB~qGY;Xcs!gN_1>%ZG2BB**c~w#@M*`J8zIR$J@B z7&WGYO2TPVLZT3aPvV=p0kg%~>Z`e-nP!SO$)CmAs(U(#QQkArQSoA8KR3EPFP@MB z5RlYq2u7uFsb=Rz@|xG_rygI8@fzde+Agtv(uM^L9#nz=9CWz(hq>)aeWVO*JaV4> zqZJ4xA?L~=$R4T0tMMmX8Wda1Sq?Y(Sh1Mu!v$jy*E~I#c|u|-q1%IGQ0&~@=I_L! z?GNS%+~NOvmkdCP+`>QQ%(Z{9S=-NQU+{C4=Emnr{jH!THjEWSe}Y3+V)W~ow#2Ow z!{Dqp6__&9+{FAzng7+cWs|!UP|8;S_9r1e`s6E%=87E7Ih0{3C;Jn$4C82WlDddP zn*WUjTN*(Dmb5-ivvMb$uxbkt&?g{24!8ma@MBp5!Slr>AO?4FQ*;fP9im6Y%>!9P zcWKk}Aq9>5Vw$MZiY*lI!c}Q0gn~7aF8gBMMuLz&54I(Fco!R7`CVNh5jD|c>ptMM&vsgL6<3gX60BRuW>BG}3m8IvV^g9MlD z!yJNf>yOin26>9r=l&Jjgfg$?37pZh0=0ty%9o>LL5@|vPr-c97sT8>ge~!2{K_8t zZ^o;!Vk^pg0=nzHe2v{)W@1i?w)RPcaC6p68GV z)GG9j#iVuyR-;TF>jTzm6f-lQp*tYTbHAojamz&TJXYa%Q2U@-u!oqT02i`5ygU5~ z=f9XGJi8*m3MS$wxmXI8Pe~?jR7nsOPldGl!afx)QRcaf(1)B*S4vcd_)#ngxPqR5 zXli&RCDBTh_m+F*x2<-y!G#K%qHg_+6}-|@%9VM#gxj&|;-w9gyZu?5L6zz(gbBr9 z%7B39Bu?cpEz^e2E3?^cZ}~-vV^^quz1!`1!z8`UtngUxiiH(pJDTPcA;)`}C!qO7 zea`REQP>d}Au${6^nt|)(*PxuV!=1_j z5&~~6%JOP|q}rdC3lPP|cK3YMo);2I{n|!_abSf?yq+kY09%lIf_usx$7K;CAs0-@ zw8)TNd0ABr7jz98Q?(4(t@sSrFSn^9G_vAkki=LN z(mkWFCU<-+F1>n$Mq0FIpgnq~zeAx(763&X*BpRy>2j@Vdt8UgB#aMW=p6Zpq|0HM zFKkxR7Uj4C#w)iWWAD4QJM?U%)~@Z&<;WYHJaTC63m$i}eqVb@?Q9gZrGiDWUHMc*|}-#E8=k z=gOsMMHaCET%PPgp^s<3^9Re`=#Nq^@Y0k(N zi0{aoK(hE1Y{oJKaq}bw}8L z)hycNoV8M~9_8%1!2{QI`K1}~sN(=Ah{B;Qq03$H4Ng?mhEcxR16l8OGA&__COBH9 z*eU>NyaBJI8E{2b?#Bdy-VlXiH++2o#*w95jnRHr0N~|rdnX1&YhgYaZ`kf0Keh(K zrv{sa0}#T27gy`Mexbw;Q=AChFGPZ=GWHL(V>yH*qTKyhP@ThjNpkKZ;>HpoAIzK3wjY$t5># zk<%8f7A4u+Mzol_gnQmkZ3PbE$G#+|)~uq1#wEw2K0$V7QP-b*Vnhdm$5G~7+`jo? z_Xk#X+}p?U27p4s+B)w_x)Z^DA$VmmgnVZ&&lVts3SpzLInH??AZ&caH&c$eqM_x9 z6rdVRLdLjyyOnT1&^%||90zVNuqWJ_%ZT_Dt4vV;Wm#nkmNsTsLGPBZYHk3i;~P}c zv=T)_rgJVA2qWDpcboX(Y&J{_j&BxI0?Fjm#qm@f)N!pDI_^sQay(b80Dl%YGF>}5 z#k3NCzVYNfT&1z_A&DR%5nzamStC8r1$QWCL*Zz6IkF8^`b6CBxTDF?z}yZU24FqL z!?^e-St<@V;|T4Kh&1HNqbEo)j@3k$6kQ;`B@VYYwR`u|_(2r@M^+n*mHgOZ7k(z@ z)J3`IRr;TYc|X*}Q@Yd-Q3(g)P4NnlaK!+ezdpQt@wy+&=eSyuCk{j*agUesr4)#jUK3O%Y>`5+07+8 zN>s!`Pw8!RPh2tU1zG}aiosGT$|L{ zj1^Q?eyzm#Y|_CRTfj%~Lv%yNH<~k-bZgSu$4Q}S9#SjL#KjZ4f<{NH)$~=VM>TXP zBkgl@PD|b5l`+g5DN#?EB5hy+H$^8CZY`v36)b{R6T@uAN$EQ*CDcmCg?~R(mHUVLR|PF=2#a%2f~%iNkTT}>J9Or@ zm~BC&>lE>gBY|OnUdT^#k*C5VIs#l1Q^s0`JYwmG4S^8}aLPLRVfQ}l?mvvL@Bs!J zQioYy1OpahUZ8-^P$BXdOK^`*%KqUyOplY%MZy^fm#0K`2!d*$3YG~7igXBwQMC@u zMhBtyFt1{xqsUu8!3ye$s^hsV6T$7j>u&!tp4XBR-u|+D95@@h)Y5cyAyaHIkVJSN z|Jir>;IP4kf&{$VbMp7Fj6}b_$B9-kpxD61S45!w*rT2@Yc{^D{uI7=Y67z=Bu}Ph@Ln*+D$9PkN7I@KcDqHGn|?8wvEA z6c3@iN<3YOzBk=I)r2hNjc<%zE_(-|<$*r{GiAU+SJ*@ZTQ>^DI1_diFX8e7dXpaU zGy07(hL-oNb~eak^d~gyW2(p{1BgFwt@xQ}mur$eQlE=2wEf&CbDbJ8Mv)X)xzmh3 z7=)Q9_{aIHMR8>?9m967EDsSuYuUst?w;8o^X8NTRRkaYri;YgzWF{&6FWNw1N0Q< z>R?o@=>85F_ZFs0qIf8LLRuj@pbrSOpRoizT77Ghwfv3ln_i?jQ*m$>mj%TWLKc<< zb`rBM(4_+9oIC z1g2-Md7nLyVdhBzDINoO%<{4z<+aq7g-HbNa8`y0WovlOd}`>HT74*>7y`}8gy)o_ z(B~<1gN!kKh7EUBPUue8-}9GAc)|9HwF9K&oMADyN1 zI>_|mdCeyz%)(D_zP<0PtostRuP8U^hj1ZR8W6?kFj2}EHt5K*01tf53>M-Z$eATi zrY9Gbv%p1`X>e(An@BhTp*sK~jK!sOwp*)lYySeIEYAEVbZYVoM3imyF}(3Fbiz+8 zJ?j@_-6D;gP5+YnS`2Cx(C{x;6e2>(u=PTWZ{Mhdg@1wt_#{^U>)nsyKYyqDW(+_K z2VPZh7xVyye#N8}(q z=I7n&mJcFBCywZRp*|io$fC!!iEY6;?U!Kv5s1bWh0MxI_E5MS%h)XW+7JB_fZ!{E z`aXM`H!Y270bK>p0KhsliUIsO9#5?zXc5e+;%Z#LQ7i`;r%^C4huhoY)}ck`0$-F= zDH-+W(7ilYVWq_I>hxtMv{;+R&TV)-w>Lt>fnzzb)8e^c7lFuouWF$1im>At?xinX zIbov$*StVYbcF?q+s71Rdx?~C-+6WKnx_=qd^S`;;#%)uX9{D2A#wBid@NyCBormO zQ`vUF7tELLAQ1`)YE54aD!ri!jCpLykI0)5{^LO6cYYp4r?(v)qFJF z94USBw#(yNBq4-rd82#QkLeGFSYd$jwV=xy2eb@k8EC|&KJ#Q{1Q(iJDc%N+3T=cl zdGkxq#H2El{*<{61CD0wEmlGCJC4NK;`y-@>yNwj)fJ27ybh|uip5gcio@CjdvJUfjNewzUX@&9P~ZJ-_ubec(Yfj4O9aLXlz zs`y^mpCS>c{vp~2%@or=MLEaQ35*nh@JKZYew+@jT8+_s(mQZKwGj-o5^J63y0=mLaP(-8J5@qj3CoGSUR(yed{%&u z6qJDNZ`XN^PiGG2Vs75e{Y1F|deGrYoc+tNQ1cmQFj_jGHV7AwllgJm6(jJCnm zM*R@rF1q{#9u;qKkppRRkb8d(LBe)xHAcd0`6E{=6#!V43kBL-5GEcU{&x3`7;?MK z%T}#|nG|XWLUIQ&#T2x+u8tw)2q2rMXRUiyJW-rqpp%Q^YF^B#iA|4$=l}%1@{;kB z*!7kG;we~MRRVfcot4iDl;H0HLIODzt&jAN3Ime9Jo2SNCb=RiLr&=f*F`Yqo!24h z7mJ!gQy0Z@c_E-Rmh@TYax_5Lg{X)`8wYEwQ7<28IeUFt)yok)I_#DLJ5Uoy(6ZY| z&`cFME{S&p;IoWe#sl|Av5V2Y>`KKRxbXn+JTPQ{)G8)cnI1&Qe|!x~ zvXVb_1GlTGIB8E<(Bc^+t-Ty=?k$$^sxXr=M)(>AMo62Q!jIBh{_Kw2=aKAz3D@Q0 z4&wqM_F-EATlzG2o!jyvH)<1RR6*@F@oy@?w}~>%8SD-xR}jHAgUo{-L}5TR&*4+p+qbVRh6h7V9XJ zOPB#Qd8#S(vetx=QMs5O@lM&$WMFx#4&a)e7K@yMe$&$LWN-+}aBf-nJF{zyi_|)I z6{Cv6<}R8QX`!AB2iY>ee%@*%bE;yd1ZG@yL2&P&Xi_p9xJp>ykO~FLuhKetA!Y}= z;R^(Fhvc_vfXS96Z`ltZKV#JrN#TBc`SG|MvgNg|x9iV8?uMz#1jCNgrC#E}8L>sZ z%511bL$Hd_d=-9$n{x@a1uE+$(Y`XpGL3|WJWOSc0T^A5yc3u>=MI>z@Cg4+%tfPv zoaW}Ah9C@HD|Ap|oLk#_DtF}9JqCp$zE$Y}6R@epAPWiILTJimAO>=!MXaHXrMt4S8l*pel`D%$AjmHH;&I5M`Hb=T}CBF3QcEJNd)+`Mec?dIt=1` zdXbh|+Ky$XkAZ?k7OT{%l3gS;-L%sKfBJlm#d(aNO1igdF=c9I2yJff4NV69>z>De z{Q(`vPeu2tGE00IqoH75oQ;CAkj3?(rD#Ju4}|D4IT>)h7m93AB4~Ev;S$%DSvxJK z4HY=gL5T6JQwv)4p;Oa7_^6_Nk=#_qFV~jy=hM?8z0Ia0V$5*_94kn!R(MuDR+WX4WJttJ44_!fc?F|d*l-)jinOueb;u15R{}I@*%!v_iV$pb3160j zPMU1ydjKGn!__vrs66UkA064S7bMq;)x9hiFZdbp9#St#k^z)$rqgJ?T*$HJf|M`C zJM>n4pXJzBDcq&LIo%o zFYhk_w?3x?87f%ohOjh0lOu%Yt?W;iQ;jhX5bPw_H+AVPyciyz;Z_T;ifsWPZj|vd zP23CAs^3Z~CJ|;^x6fa-ge*z$}$=EXZ%@zn*c*Q4L6QmLA0$ARqS){-s%pSjso@8@(Io5>>86S4R z=#*9KX7JqCM++@1_8|pyuXAB>@ByHx5f%ej7Lz5Gpry%+>0X9J45OONPg|LxnCk7D zG2I{h^=R|sqcf3DNfkIl8$w0SUm|g>-oiN z2s-vaVsSd3t_8r|b5VFNhpo^0LfH}3scRQUtP9r0WdSMC9p@N8fy~i+Tz-D6w)gNL)$2i0Qc))AYp%PZhu^WDWX1 z0DnedcV;&oxiFikH;}>@?f0=p^891QqAO<^Fd)A4VLT19;6HV@e`Rr0=$b36(t7#_ zC!#l-{y@yG-WiiQRXiTqB)oJbq};LpL@Uvq)S` zDjKc8TKZQj=mC!Kw1Dg~YcI;BBEo4Yyo@U}!cqVpiA6)uD^217sg5D2kH9bevQ1TC zqCs>CF{?i;@Jt8@OHLGUl5{Zv9GgDikHs-}`zjcjpZf9ysi({-P!RKpuk@*%E6ltG z@&Qp>%2#{_y@6;z?eQ-}3&c8(!$;J4xm)rWS4G3fSZtQ?^lquGcfqyQ&W;4Qkn-C@t)tdCcKsaPwq2$@24gD#hruw!Scpt6RbFQ^*j&(#ZEv86scm5UELq2 zxJ^D5M1}z2Y-$(Qi*kPblGRw1zPh&(CeHnYSc40W`Qr46+c$sI{h_^ONF&7d@bzc4 zPBv35g1IYp8MZ*V$CX}HT(9-+$(9A6b<)d)#HS%tTA@9zD(e&uogaLNH1fB2lq7Kx z1KGjbDd8bb=XoKhVtokEYxjQ<9W*fX{4}htG2NN4m-kS$MVn)183kLu7{u0*7?#?F z5hhM0OhzGU?O*8B)2=knpk-8|Chvao_UBG@WK|Sh59vbf1FglpVOw$S7%uCxsVqe4 zqopAO)v~$NsVhfQ!v^AxvpPOFKn?+}EVslca5B;BF}Z{8ycC#ZLfnT3!&b&G9s@oF zk(ffCULW>hQXn#jRE$vZR)2?z)D;a>{5ypLZtKEvBsuCUJe8%cxBN5~WEWFF&6vub z4fi4EP^7Y83iGVpoY;ef*yEP$#1gS5Hl3zC#*6H*@wKlF4ySSx;nk-rOVI8_hqazU zY~tY|2?H*FLMx1gxVaof<4(&u3n40`0K_HHhi8<#rbrk9P)P|pIQ4Rhy#dR}bh(CB zv>$ct8}HE{f59-)z*TM_^r|8in4MVr3oA_BJtT$Gm^zXwN8C^VRUx&FE}2;=Kkj~< zc6v{e>5mb&&HM$RqvdXM$3ufJ}7`O$&*xus}P(V`;aO7E&{8SHtYsZ_L$g{6vb zNDj(VG3Ro$)Ktfts^ZJ^gIz$}G2q5E^@cuoDJs1~C3!F8NDJIWR4QkvM?i^>kqaIN zZ%9GB@q7kB+FTIhRO&@cpO^z`3GWJ=%(scuZAKp9Pa6@TAa&{j zWS$3ccC3_D>uGQ?PBVcc*xZq<4y?MyP$0cNJ*-8_1}w>YF?(ILsK45gMzUcbi2T>Az@?ge*awqbM*Q7{T z2_pBq-lo@qwi+y3EjHUUFe0r~4v!JiI<{%-1ZXW*>4kR4kD|dIH43N2A|f}EiBL}< zM2-YG=fSipkBTS8Q%OY;>cfRnZhA^6cBwcp^|+I3hXaQ7GVe*!=!)ZYaD)E_DZ(S{ z&mEW)a8xce>EhIGb`y&wS2;CzhsRB9Vj0vckNwZ-ObEJ^;NLn0k;=$*r@!y4iSLYwVT2# zrn(DmtQ z0$1e;#kS)Qv?xq`6zlX;&kfgEA$9^gJRVGx;DCFy#I?z5 zfdP>w#0F4oEFAWI4S3UQuU{-+s^1jR<5biiUucQEXRCA=SiIf^NG&qtWeG31^~6Nc z0O{L+Xt+zBIFbh?yL3Xe7QE2HYLKT$GqhO2bBaxtT_mYnH(=TiD4#io7Bb201<52t z7HpO)17Y`dq4qn}{aQG!?FvMfc&qjZ<(=C+D<3k*H!c31uQ=SU@C@H{O@ys4Im$~fcM zx)c5?*JlQ+rtI+KOz3Q-9&iOM++4_iL(6J$dybeQKnwGwLQ8|dy!LzAS}&lQ8Z1yz zsQ~byKaF3ZVoz3siX^CX;X*w}8Y~sy0RKG07pt4y((0~nVPW(j5L{B;{NXDN8^wTW z5KH{^_q+FF6Xgg{=^++tlkjTSk3Vu$FtUq{DQp(M*k_cGDm^ME4U~re93) z+b?xFP@=Y6-+dL~D5g&L?%%xqvv>dYwj_78YF5=5QL{pP%6NweHy-QFMe$1KQ6seR z8RWSF1Y>VaUxxnuM)$s#qggqN44g|S-R%%qz*_G8s$)PrWR2Ww?`Cr{7O03_)Ll-`i+gE%G7*LH?9aFmhk*jR6?<` zskCukTjpX1@5LKbswLA#7C4k>ln0J4#I^Hx70r9KZZ#~lB zM=XnV8G&ImfRcRUMe@6Sl$CT*YkuzG#qwcqGokE~ZU#b9>?~DADJi&X^HRmtg22?6 zl<3g%pqWG_&5;tfP==vNAvMfm(bTK}`4Ax?#;=IM{j8O#@*m}_AF5mdA1)6d#KJAT zaNz)q63(y#qxw^LZn=@?cexL-3S|1qZH*u0PjfwajA~g2kWitY@5zQP`*6YcWq0`~ z1F}yutBGvbT0QgWkcLqx(6V%D0S8pLVAh|>2VVG9@2Ej_?hW(sV^^UNVWdEqDn)&8 z8mFn@?yB3QmGIPX(sW(Br%6V!EvkD;jB6lfb%d4%#9<11cxTRp$}3WArR`^4(|vs7 z;vnoXl8}4qyX_|p5uE2rTeZ&|JD(o( z$-{U+gV@llXvn=-C3VRCQ|}ykl?0+oIDxZ7*lfR|!k}U5s)r7VH)~hygELM= zfuW)ls(|9(?6_#oz;j;RV{QZ;R7;d97xF2vr1CMoKI`Z-;#C$Z1VTmh!=r%4_+d

r15%>T_NS)U)Ji-wk#{lMSfdUt+-nA zV78yd1e#2i0gby!qQR((o4@_B1v1qN$S8M@gmp+QQ z5^zHy)%%&CK#K&v9u{WW&))u$=<-+ow^e-mAN|26V_9t;X2nUEA62=|zCQ_b(t$cg zM^?_NN2eT82N1+4IVjq2hn7_0ZH)`NK1#Tvned!hL?hgfOLy@?xWb3DiJ`aK)A&SO%`EzG?~u6L1(R_nKUvT1WGV&iPeQ zOXTW;$Y_4vU+#~_U6RpYF(T&9nfBYc0+i1UQlVRy-I|L^EYR7mNvQpt?3O26|6TQ}c57c6qE=_)#7cmzQY})a9_)%BtUr|Q{96K?SW5Irfe&=Yh3URqbcWF!(K`5h=1(bU-5&c>eGYUKz&W zj&D_(W!+X=Zy8|F#}rRcy+%VCfoz<4tNs_tHL!$f?I|X4d6UP4$_XgYf;l{br2HF^@517v zf=cXUIpxvVNUo*n2P$42$B+v*%SFl` zxhs|8UD!n3vCux?l0RmS0J%2f3ZxL@ofopw_vh5A;wSgIT32I_qIlJ6-p85*l(bA8 z6H=fGWcpI<}xMWdS{cbbWyF%N>H#q7yb*En7{G$y+s^N;5s)*CeEcZ*W1idmm%!9|@ zPtbkkXYG!4&L;SqrHJ!0Ixg3smc$k6A(=_rmy8@%@WDgxB35^_GPga2d9+_(gduGz z*#n&XI22eQlzdGJ^*-UQVNpInT_9Zuk9jyk6f3nL>D#$PRE5sE9lE)vUCH1~E@&?4 zV?_FKe3{#$g0C~I6%_mM@PEo;hNYf;Q;IPYOy2)Y1*~|`+2GnWST@;~B!-L*RsJe< zU&9{VGHh2T#R~{+a=jnVL==vST{7=xm&yoR?kJ=aQ}ko+EaHbHYwgPMOEuS6CG{M& z3emuV@u-hKc5Q#`=PTd$M5=9xb0?J>^gG>m_!mpVWubmEKC5iWSamky1GjGkI2GB$ z>*C1#lU#JM+M#C6oVsb@~V^2(#Ipn>==3~e)Wkt}g&U_+8U__l9e1&W`h2%no=p=~?lwXfE#5Nd} z*!7B{o){|e3JtkC^S-ps!lQ{$OZA=ba;v$p&Eh`BoK>9CPIy{1`&Z;w}`9Q<@gRLTf;%2 zj87~U_u@t2lOVFnjy5>la#Rpf%kcL(c=9%4g|#^*bi$Ns%oUp{cU94Ke;4yfNTitX zWzQ9sIym*B?UE_Ri>O_7khiibiabkbkkV^nuf<4&-VGM&K5-blICHuiYzeCk?xdH6 zvA97bt=4qf=X%7gDq_ibu%)B`04l{xL)W(kHuyL5IQ_VUQmL!Z+%J)_ydHf7zJXDt zhVU}H8n!cb!#gif%V#uQzDU^k;Q~|q5KSPC+y!V>S)WVsTNaj# zF!22T@~0?SL--tIJ6=rv_hJ_*VW2E<0}{P{*l|Em{&`!TQsJbnt2_d@oVWo!U18JxutTyF%OyI*>)@B6+GXcbVQQgl2(Z<_RdX)p;zV*XW|OYd#ZeZc?h1k<7tyXf{)9 zGvR$Wa`^dPANgRC&XAEFLE!R96)=8x#?x; z?U31BPGb+3^JH)yo5CeDE|t@;ibO9Fs^@W#TBfL?<+ZQ&xjxT8(XH^}>%yBLQZ`*k zVt5J&RpvHB!fEmsFWI@Pg;gw|U3SYi5cnR<7PK3RCcXq(-g>Jg?{BGl>_HNJuhPgF zfjnNPbw-6r7AjVP18b8`34B;^IX+?mNIb6(RVc=6Z!-YijqYV=#Z}rB)eCgRPrzUQtg5zUP(-)!vb!Qj z;*TTpV3oFaCR_zlH(nPk1LVvSj=(EgwBYCLz<)~}Jd%BYcvzoIRg_YM5DyKB&MLeL zyv}KA-mg{lS5uNr<$JLqP;(n4rbNTZnQeup3r|D05fPkzc(i{3>d$-$IiMQOo z`Fi&Q|M3}K?Q!N5t#t_@+`jpz-PhwKbgtx)e#r!B01QZtwb~qj__TmB7yF_6H#Sea zo)X5j$J{0kEizp&SoNe#GwU00xZ<0w{)A-=1qg;hIK|rqDVk9h*2y|h=Ghfo1AlEt zMUaM=m$4z3^12)2E#xe`EV=0lI$V@KAT(iazO-N3K(Sh(QT?)@n${gyfvImxmD55T z#*0a_I227SDTH+>A<~(P7XjTjQ7Gpd&;Xi!3E{)sy|es_zyQO-v%#zNeZsM7oxpU{ zd1a6(0vxa`yfT{%wG=Pf1$)d$JaE===}x*kXLazz5N${rqxy8UuUN>Y862ub8FrYZ zvB&%B&r7W3K|$PUZiHTOgb9rJ#$D^kjA?PIg@sclfs|eui#)v6Q88vZhULQsdDQ?2 zh#8+8i z2^&ytpety*haz%WOES|IZbxRA(HZ67*-?i&_OL5A=jK=qKV9yj<>wF1&pnhq1-Hv$ zgQa-&xUm;IiekQSf0a~vsRinSxIGn}{m8@^mnR;NnOg|%5h~w@g{=PWa70@7!qO2T zy)IYMj7YCa7!+kh-L6HyaHV?@m(16?_q+?Iiu8s0k>_6wb6$*?aU{H}&}^l_4r8_gT_kaO~$Df-GWWnTfF&PbH`B49qEBh!IkZX40gP zWy1AU{E4fP9qsx8eJt0@@=|Ep6EPg^F(PluuAtL7)yCo;xqU;g++=~sHsY#^m68+n z)cR#}tmx7CAlHRFm_nnl#`bfD$VRul?8iPA?B^U;@S;av$`vlb17OrdE8qRByZu${C%FkS_ZKcO z;T9>!n4QXCh-FZfR6=VZPwJnL-W3U`*cPm(=QO&2t5&X}C2kcGb<6R>NyEuPr&b9% zy&-r145u`92CHRuTHSpmytj~_-^DLTyohfwYtklSrHQG3%b$F#x)HD9Z|kHx_-|ZT zB-~r7md+&2G{&6Y?VO(Om2C5DMD5ue83@5*5qDX){6 zRGf;2g(LANFwBjTtY7EqSh|v`ccng9d{l3eQK1AuNd_g>44e$^? zFG}w<1ISF`F99b*erSXTE+#s9EIidEE=Fnah`loL*g zM`x6#t-y2+ulvRW>S40t+g1$XsbJ>Q?5yyj+w!tev4X84S2_;DgHO!qCwvUbZzW9u z8s4w#JoXul40$lb_}zc#ZhsbI?Q;Z_ne4Aq0+MA@@YnObP-oDy8y_`-1&7xNC%1{W zM!DK^NCN-OUq)q#6=pHqr}Y8$XsHoS9P#o*#ZG{{V$l2u4jM~LgFF#=V%UlVbw>`4 zz58`{`*+2rk*_S<{&jVY0M+Dc*Nx;S;f&sNxOdbD!MYWuyjX2kwf`LA;X=Q2&^%L- zvkK+2wH*Vo%S+^n=8s__{h<3%Z01LwbdO?Uc%4_oq)NDe-Vy8&66OWm)n$mZ#!_vz zzIWaTR+n-mjjmR$W5SMBOahiEViWI`5MkpKP1D0>CF!L~2D>!({9@!}Ox=$i!eKz> z^MaO@+Uv)d(eR|3sqUJEdc5B#c10cGNtqns!&t+3L?l7~c z-u<%s?N2=2wZZjZB&RDLw*(h(T8@QQS9!c1W$!hr9DN@r7fR+RDH9qpI~D`&k-q8k z*(wsx9Xy|ki{>}#SX>s497`z+u&C@U@B`0Mo#jj>5NE7ZpPcKkf;ilgJi>UQL|+fgTVSwTY%9 zPn(cF+NJG8Icz)fzUf8cWj`SfK0fgGWHQgA$`$Egb1+3mh@}a4gVQU@mc?SJD*Gse zNI#aAG?$mT=L^)0r ztS2<#sx15tZ+bbOhF?!mb0JrnY9-JU@`)7huGc-Xq%I*|F$j3b0;~^UpjZub-Xy(A zIV!AbIWQSnco@DuWve2>0d{WuxoqIM@(GjR^#PZaA= zARrYYyi(8uldXOG=JN`yUE||te5BoDT`6L>+&%GAHo*%jIhVKx@ab1=iG3tjXo%-0 z?W+tAPJuNUa>c4$4)bDM?*4zu-t9TE>^k%N8yw+qgx`)@bdVgA?CR?38)Ha0kw5|H z!i~tRDmF!hlt?5lT>ugdAPZeJAw@_tejWR5=CMrbYK5(lWZgAVL>2gd`S8 zEd$KIV)MN<5(i4ekaM`IBL#5@xZkk_4V7FOIU7+*^cMXTR%Lunrur-=a{qewEg!?; z0-nHLSi&T^$~5lw%D|i{d&V*MWmZ*0xkY2xm%Jjc9G_EWw$Dzfam;izDyrl;L&K4w5xqq^k1nYYrNJr)3@w=2KO48-WcpdCN$r^+S0yve@s`?!; zD9_+Z>*zi&{;v8=*nK>~Aw5D zcr{O#Y&U_fn-pLn;hury;_LROZDpHNB>%*u!Rq;ue` z$0C$#YV=9jl-qlz5|={(F*%p<1$maUM1%JLKfIuQ*&5~Qjp6TUX$Q84OeFO_9;Fh? zMae}Z*hXVzc|ax#W0FuCIWPoUg*O)G1anknyu}V4wI=jr2NKdU3lZXM9R+;6q%q-( z19sUUTqB{Rmrmw2kz{}9uN@HDmusCyqTEF2<0ZEShu>(zSudq>KT;ec+N4gs(1Dr} zUEdUZR6Si(N_=tbeBHqzwxM!$G>gJ{Tnd{WmJ*XA8FnXjpcOJ$8O0^VG-B<^P_1BS z2|~fwqHltC20JMCHoiX>s^GaLMDlZFmm*FROWkH4R$gzCu8#41MhK{InXsKYEsy90 z6W_2NCTBh_(j;XSUNV+7O#|`s?o)Fx`UmVtwKxR;LtVFF?-eBl?p_p25&xg-J1wpu zWj_j1`&H;6vcW?x{;uQn659#sAX75DGH1{X3kg}Z*F7Ek@<42AR)Y~_4>UcQN>DCD z0RygP+yth|m>h!jCPbZdram)%RF0bO9AMFvRktQO%sE%)ZBGjksTA5b&qFT`njuAb z@;Eg3jeh8pB@gIQmGA@)Vly&B)#9_3qa96f>rsFBC-GFNZtOZgbr%5{^u6p#Xz_BP zw4kD1cDa=F_sb*`*Z}%v2`&!`u}X`J)0~3#w0mkZ2bR@*Y332cXW#vY?(JWim-GBnZ@In3`I`nsD;npb6MArG;z>tQ8+SlM?V!W0$|_!iP3o(A4J$j15ep@3_=JxXSGK`M06K>W zU#voRv;I2uqcDM=ID;z4+oC?m5a1=w_`r%+-MR>C1ix|A~?1@DR4xW3Z76blj|4YuF^vB z(0Bi9_x7LsP>i5jP)_Q;0y)!^^jtK;|Hkj({J!gS?E7ck&w@RAV8SzB*1&jblXjZU z)JX}dBs9q$!&u9eEf|JH}-dn9le@{(HWVQjJ=6!9-UY#JBp zXycnWh~0~lopS{1;^-i4yfF@EsWuu}h_BF+jLK`sC-^^n zp-(jnlW$B6L>snCwKrG*QEo|So5ISjDkhXP_(c(Gga@yySJ4Jq6$v^*MC2X98l%~` z3=Qb_^|O8~ zMhBb(8d=Ibo=c)(YlI!Ar~IJFZE)kH`zl`0s`zToD$?$1aw`)mKGK3AM&C3bKpL@_ zc|zYV5sPP?6@MEZl~HCz<)W%g1nDHZLD)gNPR8R@5J@SzumD?2+b z@)&eax%y|_cy4==3CR5bQ+HV~4YQHL-U9+E}^*kD5GRxRI`6bOsEmN1=Cj6eLjg*3%-JRo+8RlSdRN z2WussE!P9lQjqutu!9CCE{?71W&{Mow6@Yxq%Wt3lS{y+&%uuM0u#0pw}RX4c2D=? z=%tF-ovC>!s|iN7aYp_b72r)&JD*nCR1NYhlZLQA%x3uHYN95SNl4io*oMWWfCGdr zlVTBz%~6-8kVKDViBgs^6qY2KD$1y~Nn012EZaBWwGr&CRU353ZNU{58gC^;)ruJN z7nmkOz&v9}se#%}5pgVQIklfDA&||46c&frDWoM(0J>L$m$ZO`@hTppmP2Qn?rV_1JZ|OmLY*OtQEvS`b z&M?}GrEP7^nAXi!h2aIs86hgISX`S&^p}Z;B91!_N|GI(N7;^_TNf4*gd$wQ9!(8`PyFO7l>evSd&@rhPX3HE1>RocHQ0{s$9q_R#7 zIi5-L8I!M#GMSY$d`x)-A{&?)B%d#*w$YyO3Zn2tdz0b+4}?oaM^*SB6J z2^33RLsoIf^DT-4?>^r#vt7-IHn2-SO$A-CO5|W4lZ#bi2oTe_^xxn8mtXyUclVF+ zchvWs+~!#KK-~`F2ZsN4Ww23W>v=hzfHvw4#>!{sui*@H*J|VydlfA+xh5cNDVOou zgUVE4^uGtAz}AmqQQt^HQ~$Af3Z=1f6D&Df@KE|`X*slxK{@cNpaJgT;bB$b3eI<= zuoVE9BC<}F|AJi7x~0ay%$@tFuY$(@pJyl|si9Pr{M16ng-Lb}B3 z6QyoSD=JczP88V|24|+-L-J#;CPw{v%QQlg#t~@X76;{T(53{#dXWMN7Wlw#-EBae zKfSFZnL9h;kTTU;g(s?>C)T`Px(pA_6iY1{{$A$fVM#Y;;bN^j1gSwR&D-hIpHc9zF>gnd%$eR@cdUO>0uVkFl5Ur29^`PLWBl*Mf zn8f}%sDnmq$0uF!{D}gzoRdTSr~~(G(rB5YJ!Jr@~`qeTF!cIK?yHtKJ|YJI`5!ngG?tB%NHkV zj3iqql`zkBvgppzds5v$*D&4=f<(x~CqU$rkrTdJiWN+iLxPNRL~96uy3~!4I@MW=8v=M4+*MM` zTAf#!V=x?d&tiY73zYX#ke>UZj6}kyXpzMoY>uD%huzzMjvs|`frQ5)=zea-DXOvZ zhaXxEVYX;Md!_h*TiBuj1q_9JD8+>=U(+qDXsJ5CY}kEa8ALl5R#U6tK*o+sI7g#A z7iUL)D6+DlZ+L!%*-#$jqIeT9)@{5}7D*BLBTQSz@lI#7?nMN|S3x9;$i+KQCDy!P z-3+ipb&P;r9K6z3~-&S1atCGv7Brt_*m9-bgt;TljFdni!?S}E+TL7v)i~}}*j1uNC^r zKIE9z0XG>Y%1*NHckhMjx#>zxP+!7wdNQ9Em=e?k`C&kEf>bU1uz_Y1h|DHNXRG7Ry{S6oT%nfU-~5{apkNChXTm1S6`@kzV;Fa;VKu z@vpYIUkoXlq+&W#V6w)Dp3i1*LhWNqP*o|%73G#RNf#18jxe;}fr$Wh@FXVp@aH?4 zDBu0<|M&lIZM6RXd?EP$z3$tl4+Wd;*t{^wePv^3p$7qFmopD!u=>N?Ng?;OCYWDI z7O+1vI$j7!Sji1tSwJcYF@I(s&;cJY4w67O!(YuiKkYV?8^P*ypT3NnA6+8k4MTw0cA7W>V{%mmix5d7xmZsw;~Y8o70Se zs{(ltVu#HO)`F{Caw0!(#y8VB{8qIr5ESL#>|+(m;r3=ntx6Tv_0c>>E{H&Qjn?e< zVQDf6d|U{kl5)v2)NI->V~ADL7eN)$--Ob|4h4k>U5ziegRL)_W^IyR7*9}a1m)}% ze9Fx5_z9>QR5+fl%LFo#HI#IU&$%XKQPg-XeK&f@Gq30^;=VamVR;j2OM=k4kF0G!QOkYFA0T%9cKQFikkp{fNNU^)-VTsHAK$coAZ#9h3HqUwDCbzyArG*rhZ*A2{jhV$v15j>{-^>NZm*%$8in`M&QWM5Q3Aiu7sH6PsYvDxSR=?otIdO3U?` zBV60WO)rBy0Mu_PW^#q=7&0<^==D`S`x8uJ=LFzxgB3)u{}$H&x++bXv}nl?%w=N7*3ot>D;o!s(5SB z(jvQI8c0_2ag-dwiTJ8to>35oH1=;8m)7c@>N($I@3l1z{Az6Z?#+h}x(`F5GxzW) z$cMbu)8ZVqy2Xp)%-y#Xi6fPd+%cIX9z?sXk}*LspU{VzaG~f9k~E$!Zq8wdyL;Wp zhd`dKigwTl-q+@Rze}`ptN=rleN9V0DLN11gj^KR4uytbYVqWhw;z7gedeRXGR49x zNX^|FVsRH4#vYZT2n->?p%%}A3NklRK~k8>k0ud@{7?=o*g#_^C@*!gD?9;k8}tVu zRhEBH^cjDPKOs)l*2+?ul%O_br{=s0PDZxdb|{V9x1~5r*2WQ&+BgmjK`*W)4Az`` z4ed0Z&XxT%bAhn52|U;8S0b&BjNa#&m=*&{WvHz=DlRr(2PX6DSHQsV(aZ<4K83y% zxx?-)6Qm5}(ttCxx{82(FK*_C>Ze`+HdTcT2m5{-ZIEU(;xUJ9C_K^}w!Hg9PRZP< zS?g+^jf>(pT?*$1Kr~ab#*0}Zu*62_)*ehh#)VAq2Tp<;e8MV#E!lbW>^w|fM~Anp zeZQCsCWxX~cKuXnYljV#l=s^gpp;)6DEX(3HfuTGm#|>9kXnn4>Rz-Yyv2F(WoSIC zQ>ktyH@Ht-sFn(odC2gY7m!nPGg^?7(`jZl8>yDzR##k-D7vE$+{U zA9tU{gNMBn6z2f$HYrJPXr^9r6fbMN8xKrvP@De;{riS_W>?Mf{N|^ccePAVjUuzm z#o$yGcd*Yn3?e-eKkq8K!8P$)nlnxGxEeZrE>&#qaksmls2L&)-E^HfAktI`SZzuq z*9a_0N>?fQexv)f_hKC_>UrfpVJZ}*f4?PJbHBzx`foG1CP=S>u=37()HN1MJ$siB zP#Y+zs7r{j)taldig8rzvZ?4@mGCt~)44)bKkln19`eJNVg(%=cYgDJ@>}C2Qy3kq ztPc3g=YOAQlkN!TjH0Pn#l3nISVF>y@7)MX@bpF2U1JBGwMbwrLfI0^%3PUUdBB!} z1xI;iq%|7HqDRb0iiPHxmo_E}Bd_h6^6NTYuVug;A#W!Bt~3C6GtO9v+sn8Gsq`>N ztuT1j@;Krfa=Dl6g%&F(5<2#@6QZ+hT;aLA>Ow^A0yJWZ4|AoZ_ zAxNNX?YT<7l!G(c6Ds~+WSAhq*xM!68gQsrwn~ecd4WTOwcs5gZT*%J8=|#9TvmR# z%B&U4hl?VYF6hNHIV0X#+*R>Do>1*OGHw0uYh|zONX&A-l+IcpGq2)QzwQoV4@&JO z*6ckTBU#RJ4razazS_*8kKQ&yh;)Eh| zg_=t%36)l9{1Z`(AQD;iR|P#ln#>q<+2*;$)5$XLl%H}&@F#|A^1JvImE^4Ln@bBK zU?eTerd85~;Oqoxu!l0}x4P5>S9gN;*XpihUR%>t-lb!sP*|+x@!kaDy6B~%7~0ot}WzfTUg@<8%QeJZB#)76-pPy zkMVVbxG>4&Xhu^3RGH-jS<7jY5skpYH^Z$0iP6YB*h*f-a`%k&`_k27hp3UIRB_7z z@vB=#AQx%*7A`Vi_I>{8z8`PmS_5ZR`STpz{UlDhiPzdw^fA_xw?r5GZEvCb{5vwWgS9zcvFH{s?Z~lN%o&6^1iEh(9SkXk_6hgnNt{BQ~c3)#^;*uB+OmY#wQrhT7em9pHuQ6tAG6q`&@ym!cX)DrUSCe04Q7 zt-A64;`%d>VSFTiF&}nXQ^6M@&V-avZtkdz4}&%W@dJ=wt=S^sPyKKX3$>AJ9PDw( z*sboFc~W^=r$vkGcaKX?$!%P71cuL@ZaUm2EHB@J+cqKL;`r>9dz~SBgo`06ISxo$ zJg+%m|mC3bKvkoNFId`(b?0G~Fz>iAcSuwiE+E1jCg()KKx-pMIAQxJ(v( z%5{$?Qr^|4o|(WSj;_W^q5lUVR6hKyOGHmZ{_L~(-j`H*$@=gSu(u34fCQmd>#@No zq9;aTmwW0!jw=}5hq?q)jc8xXfc1i3A-IQ{?nIjTG(&d<98K9Hj`P zG%VXwG>K9*v*FEz@r`-4=nD(GEb(qHcuB|ngRiZ1DM z3w%c8@hGj&em-9EoS2Owvrdf^qA^Qn=9|Gn1+iPKCYM3l?e)xR`1q=oBh#CrO;|EL z#$4r0a_FssU;3~6V8prAcGniIEvhsWg4r&l-m+wPSa@eV%UMvdS zSlU-sP;dk0jzi4yXS-rlA9Lpg;g~#7|LiE%{+Q4;=UJTmYtrImOcp72y_mL4fbuJ$ zJ}pzz>~pO;6yT_MGX{8p0g+Ys`SsdT_yr?qv8 zEEUP^m`S-w!QnL1PKL%@#6dwWU8MW+tr|V81-zH1P zk69Hj&b|I^LL9P5(;w`|4EUO$awcg`D7WpeVkeQpt;;$_pi-9O%gvGL+d#OL zE03^_MF0kI@v;$6LyBJ24m?~B>6MgLPrIM`vIzlqAG219^}1Lmw^#D}-6pLSrq%96 z%1{Cmxp{S2z6ISiWwhv*NwcJa!ios2jlF?M?Z>A?tdf#<)Exyk(>AVmtCzAHHURQ~ zMdH~CocP#VmR+fp7*qC0;8S2gTtj3Zd1o3<5Z6dysAhcmBPw{qzAo_!vPdc%&1?H^ z3{JHHBv2h)ENsBSYG-?uKYdSOK;=$BxX4tL$@%Mx`ijA$@xRyk_Pzws>K4G?bl6fJ2%^?0SxS#8;EuL(mZK(5 zy5b5dKhiCkm@m2!5{3e38ly@)CJBK$_Jcg|Ddb69>!gqMIaOstCTkcE{7_0$p?#lR zZHzTjiWqm3Jo@8xSGuErHeZ0BfjmB|zTeh#=Hu>Ul)|#^{CP7r2 z8ZwLUL{Q;g0&v9Q{31L2pe;#vESA(ObRNYaSkFc46MYw^9{beP424d8)GGF5>*6O9 z9@J-A7I;R$t_{PD?SRAc7(ZkWu_;))z`F>c*QaH5nAk!!U$=AcoAwaUQ=2j))i<`` zJj~{att*#~O;t=zKo_#dUlfMgmrIfwQeF!kF~!?U>*#~+*2^F3+kyb=P`C;BnZxAA zimzNqEVDNs7$%Rny_R3CD$dUmgU|#>5cAbJh68yBO=N?JIxaU39wY79K;nR(GWk5r zkpukY!g|=^#1o4QQ~TafPp)UT^8cEgs#`6hngo-$j zzn3xy#~pT}Fee_xyuwGZ5lo5N#`-xjAcu0^z4^K;O>pvg!Q1Z*AkGEy1Q%IQB(KZGlH$R@q$-`g_rH=hzqcVpjeu51UH}PRXGB9(+dzio_ z<7i1f59trP<0X(ld5gR`&I}UJ26SUu@hezLrQ(Ms&KK9Mj3I4QstH?L*H{(lc))Ld z!{_%^(rNtPcS`psu4Y&zT&oiMZOM?SEc;O$Y%ShFG8|j=$*o`?q}KhI^qV;HO?B=G zPH7ZcTH9dC0nN^9SN^f;)yk>vH^d^31nz}adoG-ILIKdJW ziIh0=VX)Pu!ci*JZWf!2J6wZu@@GUaypw_4pNwC$%QGQ>v+{N*W9Ou{vmGC1QbC>e||?8|_N*Ke{E^Cu;~ zXtuP`C>{^PZ*Rn6>pt}c1LXbn<+4p<`RJvgv)z|_` z@%%Dx9~4m_D~LOlnC4=bM6E|mBe?0xg;Fdyh(3nH{NJw%W(#j;mISl5yh_x{NlHF- zwW%#L5AY?B=_Lk7Pw68lT+HGljdn^`TskQ|QR%%)FP2HEEH`nQf7ksyNcHXwb0*>k z0plk&6$FD`wGs_B8mw^l<|jxzUoM~udd2dAc*q_O*3iVqt^|6oxhv!XTDPPA2{!uU z3R|fggV_PmQ0<5N?(&6+a2*x8T8_-`-&gR&p^7@HWRGpg3OLRy^T~tmvp7A}<0tVc zENj*ibR?nTs-+dgR=E|&sYVE-!pwObxUO#2^c)+0XLDyg-tp+OF0&E9$x!t*74)V0 z6IOJ30!(I#KNAAGAQ0@m!F?CC5y&+U_>Dypn?%ab~>96x2{z0hHqu9BLRd*o&9L8iSnq{@w1^$_?wo-LO$O&=S9$(usPzm zTLo~4ugxzm(2@j_kbIVgiYzG~YU##Op_A8hxX}&9@z}dJG?7G=V#%4A@KNxNZ0{om z$jJkn;YI%9=mMjY8&Ui&YUF4YRh~p}^!&Uer%ELp7d5XyF)0!8YO0B0gVGSA)F17y zb?dT(1S`KZv!gF~-aXC#4HlC{pEL5+8-|7^Ajb zUcatUIG_N|+V`Tzn{Zar{2BC|Go*`ZyqInogZOWA-QBxbr#RAgP0yyr?t+H zGKW6$>qpEFNJr$pRN#vQYNgOyo6)RpHd&|(74MHzYvgNkBo=)?q#-%{W`5 za|;-x5k|$MXhBm3Q?)8eAlAI}>a=)_nNx(%7}(E(mB{;k5K;c!W-Ne7N`0+>KTGGr zeP2&9Ov%>8@h^!;2=+t}uH9r09hZFYhx5e& z)hUZJGe}C3iik3{h_lt<#(wrNhpr{Mq)4;Hw4^Evh6(k5OJWkU!A?~U`M)nOCk!6&jY$&&Jq zik4;+x*_>))veI6Zg!mEMD1SETs;IvO)~;UQK#@A7iP8aqG=}j4AG&ZQM{J20V??l znp?;?)E83|w|s--dm`QlUe3PcTkV0CaPqN6?E59b!jDTybTan~MfbaGJ%$=RFB>40 zpFI@=pX|Z;k*I3oU@i!%s$L1;u4Z!Yf|D25Reic2*3D$B?z7Uc_G! zB5@f%zWc+sge-sI%hZyC@XE>_m_v!!t1ioi#wirdr0@f-T*`(6L9 zP@GR|`tH7lUlw*5A)(GEmX~y~z`B#2Tsj(4P$JhbnWPm7KZ>2HDvd%I@2poEc&>2r~Aebf8p~5LWIehH1;FyQfD_&km+#@E&Gt|#q*?Zuk+YcSE6#ApfAo#&O-v(0LV-`2v5dWi)2b}w}p)A zkKrD3wczHPSv6*Ysy;8BE~_D#ENwov&M#^Nm>6b*s&WIEM1X7Y6lK1>{pYUz>#u|4 zW0jc20Rc0q^2EZqul9`3iXNrk!{JOt?0rIXf>Vz^afPQw^MPK*fdK9i=%LG=0UF>V z4w6C=?C*FBHpirdjFGGw>zYDawczAunx5Noqt3Dm2_~w}iSe9QHIz#$GGWWtlR!pj5+JPjDb+2Qs4OI6E z1a#tjr>U@g*(v4UScfpw&*RH7ebN+Y)p79`49hy>ZWl2en~*O)e^H{rBV}@wgMRp- z%RBinQ_V_LsZ~YF4JqI?9Xo);bIqDX(cJ={nKhwO-=qZVzFb}K*5#q8tK?2tT-*{p zCGvJ$q`?8r^VK229|E43bz`7pbK7fH!}SVQ1{`Ya8B``+0&ZH~!Vt54TGLkPgIR!F zRzahAzjV~31@G`ni5$q9kC$NeQ7*VF=!YtO#x)A2c*MxcI53c5V_5Q|$tEbK7v>QX z%T5k#puCSdzhhMwmL9xxG^MnrTc|7I4ckc zz!upiHdb`4Yp@RiT64W_6vN;FW{5T1VE?p(rZ#^!(XYTy@W@CGnU7dzlM~40ujxX? z)3{n-Kk*YcfrcINAc{3=*cdLB`UtYA;;=w*t=R{T0sf@dM*jw7!Pnw8+Qtbq!#eVj zj#?^0ErQ+GZ(v@ZEEuE4k4jL-3Wm(P00j3bG{|e3%>qaYk~3#RqUW){@{1L)!EQ`D zl)MD!LF!-m4n_0b-QNX;cE{bu+I~=%1Sa|M#YJXvB%{`2g?DdW!}6u<;EK9eN~XQ! z$D|hVLho~H!E7F`4F}~6s?FzCIC&oz9ooi z$MXdEvloHD& zxtrqVa>$r`>QKjMR%dX9rA= z%S!2t50^-#L|HspsJJ47J~)hKj;$f0acPw9QwMkrBy`f=T3hSZ%$mb1O66I4`jQlz zd=_sbh|qAq;0SPCj8(j%BFxu0tJFBcD7t(TwrD!#$bw&XcYo=P?Gs0wcmxq^Ce&D^ zIKcGeVlm4*LSzexSZuWs;|wywxyLZB>4_Jr;tUu*{XIqa>Wqq(G@k1|09CH@n3+vT z-0_%|M| zE2WkbA%TcK^f6%#-~{+qj#+ulH6!x@Cl5D5^4#xx%u%9KtQ%ETUi{4?h?rg2@n|VB z_B*$yPnKAIi}1EPBnU5bT?rFHYVsg$DO5>;)etPp7+&scSbx;!mVzf$?Pi=3=%+Q1 z^d^Y?^}Td_CqTg^Z8h*B);pDV+D$9;$XT%ef`D+~*x;t(bD8e3LV$ZDju~uEJ1jnd!wGR^a@@S(%lZAB ziz-a|6m2Je9b9w=CJ3n?oIlB7H7)MR>=lY{DT2EK3C@!H+F0gTEJ9Utkn3~}#TPI( z(nLY4(P<-b1g@uC-#ApH!6$y$UduKu#QxyZ?z0EpCF3MK zCZ$-a)5&JMDlYN}pLk%nO7rn7*j+!Yw(w62o49vyBX;14l4L z+JDv?5nsVP4Tw+0imX#6$Ttq2yN@rxP4%EK)rL-5{lo=HpS9j*I;WMt8o$OH);p!8 z#!omM;6g#EcYg}d8^=JF;|udBd!o}h%?voGSUtTtxcB_Rr-4FlX&7aPXf*W3Wjq$t z{?z=29s_T~6r5yr34rJPns-h8DbN5C>6mR^ds-eP+8Y<~-5XC;+c3C@&^0XtPA8t)VtpE;WQ-Mq;E;T#tLtyK^ckzIZIMWi?&z-IculCWq} z>SJ6^(axX3s#l8%A0zpCd~i7C?8wcEM8yP}2u*r&9O|{I+QehyA4FWDvS#HpB z^2;6)lZJdL0PtV4(>AfzBm17Df%okp+2+da#q2>hyhrZU; zH^OtT?I@-~f5r_Z2>+2aII03+XSAO_x?NV0)0I3^fyk(${v-A$bO8|=KlXh;J~AIu zJi(#ZqL;rVxW<<87GjSqp5r{E4?pTYHV5?&D3*A@4zX2Kq^oT)7Xe6|i;+WJ;s6M< zBX6Xwv)T?9_*RAyzDdd}U8Ta?h>0WR#iMGxHS&OY)J+B?aaKXnLK^LKkGH&wZR)Oz zQN1eX2wT78oukQwzPG}I>df!$l15gNG*?!-pZEla-QLKw2l|=Z#*d1E;b#UYeBN!v z*HoHwnoVrE2VB|86(4t*M=o?RvH!a_zuSG+gj8e%5q(}Hviw+nW8qFEOq=;7+M05B z?akmX464|@2rNNmna3?h#7n{ZwcM6hbepjjRu|rc$@g)$3y#Bqus`(@bLHJC#S1_U zJ{(GSC3}KWKkU98yft`CQ%ipkjZ24>!U993D-n~4&mY0tWrXmhz>s_UEBz7}jxo3F`O+a5 z+9Py&)ZKf&m}rQ2F>F>j6z(P3PuFY3K+XCTEeXLY1ZnvPFPnz;l14X_BPW%C zKxw#?z6oY$#_31nZZ&pA%=4jXPfxsdb&5S8(>|uYUKQOj0uejOT31cNE`Ow{6zHq< zJfe|4Op2fssZVIY3{1vn%~40bqBBm%`c_ba9j)3s~j&*7b`q+IFW zMdVB=lqi1N7VY5UA}KS4O1iw961nCx#T?1=STd18>5GZO^`*6a;jt&k(~AlR;E*oz z-nIaIg)}j-8A5Ll6ZT*PL#&Q#MCaZT<0KeJIt(3#(lk>-Wvm|a_bL-LKJPvcX?4=Q z4(?)r09NxiOg*GuvOSvhkqNL)uM#cdEKh!4MuIu*D zmig>O@ioZKmhB@m+MgkM-eqk=$I4!U#I}i{hp=@26&CGC~47xS3DBfuuWMni7wRQ4v44B$uXDcb6p* z_^jJpjWw!D7-lmm`sTz?$ClfQ2J$gbU5!!;(eCpdtAjhDpT56G0s$e3Ca{2i%m}_X6(x!@GfCPPY*uP~ zy+$RarkeWZKasPP`%w0yq4ER%y9jmCdeqi2<+!?9%nnNo4_6oX!fPM_MpZF>+2`mo z=M;hfU(f>!S7pgO01F_>m6B@{=OIpDqbsv}_}t>CC`u;#4E=5{BtU%uk2D!6MTF&`2`zVULl7(~A1PVs3c}54I1M+~&N1T0;8oSoxSmqlRo&SA z*GB|BzBQn=NCS3f6(?@0pp z!={s~gZW9z--c8seJgOR1KQy3Zf<68C9aXb5?kwoGL|-~GUE_ZW?>SY)4H>WUrY^+ z?yc2?PxrShR#l`p82Lz`J(^nWV2ZT?;w$;=>JI?ay{GQcF{OEjceWzTq&n|pAgbF| z39+DUp?C=MQ{q=?`jGi8Eo`3IegzlD(|6ddI*Jr41P%C2R}-2bAcF%Q<0Zx8df+rO zIWP%IFoSx+a|IL&pRLheCy1{|__MWGG)eNl%55|mawV3Dr!yx(#!62`6Q{+ZZl}v{ z>a4>#vv-vk=!d{asF+OH5OP1y(7}c}2J(rsf^rZ%(y; zODBEVUCq-r#{iR^9YL^o7+9-<{IT!5H~+T#pW=sa1k%;XLeUOCj4`dRY0O*72kvVw z=UzX^2=Iv#Lj1ipEfu@bi*1qt{isy(pd6ZlhRFy;e@vZk!D5^bhGj4>vf%CC#dCnEjNNQtDdBy2mxt`K0J^iU zV`OC&KqC@&Fxqd#>9J+nahG#Dq$FMlp6{B#C}41tkjgcpLso^FJ*oUVS>weO;;PT* ziJ~AJSzs|Pz!8Y}wBTWEkS$DWZW4~t3uLfqHG(&}0^T$dd0uFsx}Eal8a`)2zW8xY zIS`~e8U#-eSwl2DUspt4N4(w2Nm=&D({BY7kBI-Q%T-1dmPR}pkXW(Q!j5@AM2^U#^em+tad}^#)HuXBkp&Do#J}z ztnb!*5jcx&J1gS7AO2?d@80u$nh<+I8m4hGr!>H=e^EwcD|xw;OTLyVZmUa=Z)!94 zjp6il1p(G&KnIz(z?aD+h^3Px@%EP{;8-?Yad=l$@^|L9llk25ru=Tyhmu%xJd{=$ zS6N7iQhZgrXLHRCYf9pV5`f7=jn(6hVU&AG1Y=KcUI$-KRcm}Tqt{>M15j_$2Z9yx z$;xe4`<8n!-#e-om#u!`s3~O>9*p4=tU-gS&mi zgPUCA_)>zlsXol++cqON^^6qZ7viXJk1Lp%7f@ z7PT(t!&nzi4M$a^uABs$;UuEZ(;1`}2@-k9t)!Pj&LVjDY4?V1X9LK zav(ZwR@^MCf)s?*PD&>Rd(x<-3@iQ_6^&qIcx@$o^;ll-b>X?u6#Z6Pc8z4%%nQ`E zSqJ6Z1VmW`II_Q4L(e5}D>DOOb;Pvz zCF+o*`XHVUxWaz?6YooLggZd%-n=6F-R7Qe0k$EgT_Q2fFsPYceY4|qiIx6ZHcz7q zVhGt(_WkZ#O92D*Z^J}-KTzp53e_JJI3GS%1zONdelAT7{2fxMFadqB-V~!5fRXgO+Qh-3Ip}f2XRsF-VnbG$;Y`A?@!aUN)8Lw zcVA?S%V>>tfa^mPBaMVs|7QEV?h_ zC*dk}u!X1W6>84$nhPGYdnxQ!`qRLUuv+qg3RE&|hK>s+$Xrhxk29qeZ>*uO$c)3& zig51ZkJFAOvws!}2^l!num1>*VNBn|)mkw0t zt#zur_*e+ww03Bza88POYadnY6LsigUb7iva&U-Gv7BH!o+w&VS1{r}-^!ajUdXXx;~DS4w?kJ_|Cpr;EQNZtxDX_*r0`ak6uxZcZfH)q3O6b- z;$d;Mnnh%xN0@~CNm$V5N$44$_f?V*qf=Q7@%A+k4}W;pwc^`!8TQc0{+>v8 zLb!|cP%w;MltHcPY;x_c2jYGn5Da;&f{7yUIKE_01(X!X^LEtDflfFV<+f8(g|sn& zhyei$maiatxPe#YQi4pPIiw?N9^$6txy-VJV`&M$jvMS5U0& zDJJ*q*kSflb&-qjQ!MB8^wp&=y#*em^ky9V`1~H6Jq}gMiS>;d{0&lkxv0`4v8&T7 zl0d1ok1v6kAtt$j@c|8DAql%=$?t)%TolWM4us-?Si`&jxqJJ+`ISohv~&@9>%KvVp_bywFHN2) zb)Mj#Al3ezBrK69?9yOG${dvp8aI=cJ_WB(YT3tef8oE7BK=MBBVgY}*_eh%6CChs z?@LYrd*TTVkNGT!g{j}Vx5cZ#pyPur!yqr9=v+;$5%=aQIjqesRdak3QFH86C>U>I z=CE+&WRl4-8VIA#v|PFGTt|4;UmB=yPMTRtPjTPE3?a%AC@;A*Jj+71lLwT9*O(OMTw1m0=J=VU1WN7HA!!W(xxpWdwY~eZ?(UD` z2iXaagXm;AH{ZKqQ1MVOMnbWg*Is(&QbS3?5|XKM(ic?X#@YZa+EWUDt8HL$sz{ew z7JXQ2?mCYlNN9b2SnjP{V2ZBdgd6NPFLIVlb1H0#zp}&N#o)j>R{Ltyq@YQU`~VG{1UND(!L4s8-{{ktQGzLVbz_V6{FM0L ziAoGKJI_i{0op4ikrh$Ca^GIjFKvB*j!L>5Q4|l&(djM{3zK6@BZfG7wJ9sU_ny+p zYY1nBk@rw6HcJz%PlB2lvjQW)BH}dE3xx1O2#$?zCH~UZP$An*!t0pIm9txI3H!99 zQjD_!vPEdBQ>Fau%c8jy0j%9Xff0#K>9?^>)g$B;9?5O-$}Z)`#D!~8i@>%w+58;R ztNfco6|Uf<$K@c)h4|AIqbxEEI#4X60>2Er5@`yjGBWH;?XpUviSsseyl6mO03tz! zADI+KR~WBlS7C;wZ=6W2lyB@IzpOF^>&?q68SzR-MkcnJ9;AJq7okm_hfs^cU0-x- zwZRJ8$SnEI?l_aU>G~kWXg1wOOab=zO+(zeq9bv|Tbt7AqdcN`+)wEHU=;DW4SA>( zx931A9GxH(1v49|9RU70u0gQzqC3kAp@{dANIp`c$}KQ~7pmNWM1I;#gY;Z(2@xS~ z;{p8*p0Z{& z%sp*=6;VC$(;JAfWGY`KC^BX#MLw-^=LKT;u*g&8#zGNKg2VJi1o_xEqzFk+FJgZP zdWB_Dd{Dwz+1x^K*F1ZmfCMDZ!jlRV$9b*NYBSEYX}Y}R7@ip7g{kQA_<{xx2<>9q z!?S>5O|HeQ5Z80};tiO{+4~0%OHM;F0(LgDfUVu7w-L^*BW^rS0~VJ;xoh{i7QrgO zB%b1fyMu3=d1O?zj|-x-jclX@E%oyV<7O%EX^6Lx1wq^L38yKVL^+*8Y1x9y>O^qd zEpWWd(o#SumZ)04gj~tRkn9B*^(7|1hW5?!K!fG3A3S)_J;)FgtvARP_A2D8l!*4u?DiTFpkc|u zX^%dt)k0_$xXE`2fJJT9YBPdc;1P5Y-Lh(9Pt>Bm%0xsjO*+5UfFjlO^kThe7|1CD z35&3%NK_=RVx?);%xa_Q&8I%RtJYl4*=aKsOP(%Dz8rBInyktIzc}_ea)>ynh!s-i z<_T?m*M-OXoJ7Suxqv1s7nqYO2wt0j#tVFwN~q{Hoe4rtaY>{@CeWMN0cCv2EGivP zRvstBeoIdjvJ@MLSx`>GHO;wPl8&G(bOV{Ev4T5au%$y(qPm$EZP7{u%FoSbDIs(Q zYbtxQdFy6R65kIF^2UPf7N=+vSUEn~WY~+eY*ndAcBsj9Hao@wtewaB2+Toa!~{~6 zc0m42K4jeZsql{a4Yjil((SH#z_d(9U{e(i3t(hL3LtbEwAI%#wpv**BK< zRk>-{v(1lQ^5Tw_O>tGsgb^dh@l!&Sy!#K`uYNyvCM2r5j$EnpbFUdoiWpRZ_{W?D zT_S@2DJaAg4PT~>=98Mr&*xW6oVW#Y_p|S_-Tfh=1B&kzInLPwcFj^*kh+4f zja$*$Orw- z?{vTOJMpK+p=SZAJn2rnrAy3srT;Cm#D%(NSd}|h{O)N}`z6_LgcSg!S$IVi<|Wrq zlL3Ls0Pm(g3eH_Ah;u>}X(q#kSa;2;st7H$?uf0R)AYp!AT2JiccjQ+(E08S=}o{R z1Q+~Z1SBU#hG8w*REHY0_w}U=8DE!G#D=O$v!Sf4+~h~P2zB3!H*@}UC-+xL>S#?@ zlky0li>&Eas{cR4$y1}S=P$O3ku#QPrbOoyYr1tzeEUlSf$GqErOzOj3guPeQ$oC) zW(z`AgJcX|*EqL|J7ww5gDI#8iF?n5RdCBX<%rpq+I5@;0-H6ws<|ssUGIVE>)rEs zAcz>fY6`hm+7LTP9G{R6!mToKMz0h}0%S9BR#mKD`peUBOZII%f?Nr(gd$X6VvU^k z0}#rkCmPpwaug@CKj_xufRSM!7T#;0XIo6J&&j}IoKg;OqH^Wqc+2z6oz-sldGVmO z^fU~I5N?{=keNUroOZHhvWdOIRnKE3>eKWM08k}@3SOk5!9(eopIZ%R0)s5T z7yf)fcj9)OzCCTQAa|2eJd+badX^WkWw=X`29?GdJyaqF1I;<&BIFF1A9e4?mgNf- zEM%V&HNTtt-|{sJ*nn1=lrS0A3!Dlh-hv9r_@DQwazS}v$pmCd^l@E+CY#CYCjP4# zY^+*WOrkUZH|6UjCa2MtsIs^N`&NvJcTNRmMPmm6#Kn~YGxw*-QY8Fg>H;iP zd>wc&F9Sio+7f|AvlQ#Q@Q8G*(?m`N3YZqO|ByX#x-C@T&Gvn`NIQrVc@F6GB-Z)E z?l)tfK>ICa77A1%&B-XKIc-FEhYs~T6J6WCrHROKhlQ) zGOFssk9;nbnye<%+|v&MlCaZg@ZjwMG+xdHdc<{RzGXF_Pzn++How4AGb(yNq{Rzb zC5xar63J;|22__1Z`iHvWbmBwOxo+9#Ld&!uTXLqug*D!xGx&bVJdM=4~n$Y_>>mr z@+($Zamn+A8I$ z&+wzxNaQ9wSt^L{^D|tHCj=h=Hi$W(91_9cN1V|MQjU&)gPBTX-5sY-F)o&eRD4WV zWFPQFD>j_z3&sScHORrx%53l^KnZRZA{TUJ1VP#71hwE>XL2 zDxYKMnrO7DE$;R+JVkFuMS1#Wyj+Rx7-C+3pL5V!CB_rg8rIDrG`<6qNLW|V0c;l` zp9>lzdi>IL_3;9pisgPtd&Br|WMAqix!*{*>q~Pa&O8|pJ;7tma-LyH|A;tsGhSf` zr$r|ps9qgcA;>>%aCc4o*aP+W(#5|@HbTJHfa*G4Wy&!U&T5l#s(pwx$_R=#C{cy9 z>7o$M5FGPjVA03{Q5j-ygUD2RN+jqYrIFM{h^b446tPbUa$kxukzYZ`&8_YWe+Ry% zOoQ*lA!5`Z=KKL!;Ru2natFR5oZL3=vuojBobY(fdbjt~lz%2T%nwxnozB4)r(-P1 z*e0UYnZqM)eJP#mJKjKbQla8aSjz>~ZEYyj?;**oaUaN=B1c(GrhCw)rx}4EiMVxb zP*-{vcW(%4zza=0`BQsDV{uQsMInbJk=f&dJn@nU$aWk)2nF_o!GY~=ch!Fz%S2C5 z99)HqRm6|l(N1Wk9LwQ+giu!W4BM*=F(9Lu0AZrGG}kB*bku@?)KV8WYT5j3xulV9 zE2AAZ)H}vawNW-g-W`>XgCjR%%ZJ`>7xVIt;-J?$TKNRuDfx0<08r98pt%%9e1bQS z2^?GAR0|T7I++EBEuvFfC?A3maV|kuE|4&*B0}MAa0E=FcnRq;tKDNibFM&t5vUv+ z+sZXD>;}49^J6_@)Z+c7ghPfs43YtCaJvef_knQfW>n${vX%O#NM6H=%p3ZunUtU^ zWRaZBeV}Az#V%z{K$%j=^NhSA;v})hJ}tmcBWwluv~**B0zcFq7c2qu#Wow>Bm1s7 zM69oGI1tzJ$3O0V?02j+Yho!=i-6$T{#wvR5iB|i_#3zqk}dH;W<3JS5b>SD@_oOB1dEpFtAbZBUQ;e5bA(y_G zfcxrIf(w=rS`-8pqe=SrcZ{d|qqo0&`{$-|3`9=b`tHrs?z#W2p32mdSlWK}S@%(F zgEQl{Y?}R5(aM>zuGkt1ZE`pdo{-n*T>+S(a)U@pJJh?1N*>evQ5@id&`3k< zJRk4PXFh<3|6|`r)enP5;0+Ou{FizGdE}jNWvtcJIsq>JHQSiAt9P@Y;mgN zF`A-MP~$PYjEspjDEV4mt&dxwULFQU=VZ*$X7{tTc%U%Be;eP23EC&)=X=~YH9Tg? z8ojfI&=B z-a+J*9#upK#HO1>5r!+Ite!|gSd{Syo-?mT(o?!`dpnPnw01Q4+Fv=mlBr3iYmiJJ z)+|#i1S@FPN&FQX7^XH0lqh{UNFpx;QxmHY=)Rny_Lw`4q2=AK0m*p|DPxS&Z~Mhg?I6)VDxRGwVK{9SBh|3W@T;I7>Ug zmxeX$`W$gzOZjXoen?mj_?udbVr8tNZ0{Ro#tGAi$DEq~ z8pPts1#e+pl%!!q#-Fhwo|!=8H>n*b=u(_{O9e~56M%=&ng;Dlc;_aQfS5=$H(#w| zN20c_)4)-^Y3hd7%*QX5hpwK{H6xfXH`>H`79@~7iv3az@kuIc4Z*T|Gyp-OKXn|~ zeY!Bu300=xQhx=@!v{rSk_}NjIC-f{y_GSAwE7-G@O~L?+$V>}8t>lxv@65blmU^U zeYpxVRrNF+B8ZKN1?%~y67IS@@f+~V`Z-m;O-hMH?#C|{$`F#_W>Lsrl}L?iQ5}-hKO~+2jza0ZvAa%V7>;Q_-$n51K5Gy|M zSc6ljXb5W3H34c8h=|LAh5H~rEEu6XI4u#t7sv@GG)Ww8u5YAmL^htVk=S8BQth97 z11Me;s01qIjgh)ccLW0eHFqB1Q4V3nULvqFp=fuktO@;;ACuriO_3ayIiZ*OUv=^F%{2FBca~DC?!5V%M3yl z?WS%&#tF9|6m6O5sHpVIe-CKZ4TK?H>w3Xo3MfRcZP(xt)&KUs~RP&g3Yct z5lXr4`yo61K`42_H>F5;$;-SvL>dk(%l0rDmj+kS9HZ6X6(nkSf>Vm5c8M6fhX>73 zM0Zx*zh=MN-42F^a7;V}Bo1yk1i9qQ<42tct%SXxVT*ooLwz3E-qii76_02GE@lqx zPo@I#`rbilz)ytwWlBOjCcjIGFYN5JOgS6tl#}|{w?dSwK|kUvi8_6>R2Q)#(%K=p zZqOKs4yFYN-z|YZ^Xs`JFi42kbS~j1Ibzi@onOA{qy3ftw$^B28W->bISlc`yFX?= zUhtm+Z=30Za(_zrUkPlbL61I48=l~__3qRUAv%{bbD(zpo;EXdvk6j@UpWz9jsHj= zZyZYC)R*pvWLGX-N$A-kuv=(w%A^uth=PPP$x7eixMJ9<@}lL|1elYRf#i-{N0OehV?dzHO2>!n{Mo=2C*- z2-W<`ZnJJH2}`BYN6q)uJjBZ#sEZ5Q#(X5398eQIg)CFKHjjAaq~OM+Gt^{D;`as=KfrO$(7mvB z-fZq#=~hSaSV}wC?QvpAn0T4RPM#m2zfxBG=@D|tLq?txFkA}`{vW&V1c@*<6^$}M z8V9;XF(xl~WN?=hn9rKfGWDcV5{WFTStKx7e10FTW%edY)_*148>HZ@`zrQJ5j5I6 z7CAN}-@W;`d-T9`YqA^F9vA!W&2M>_7U;H<4N(0hx2)ZHT{WI z1`fKYVVf^dXk&`Tq#KtF^>*7|_#=`tFc~5}cB=Uda?cUTvBXDUw{Z&1i_ZKiYJJqM zGWNPqxhTGS^1p&$Eh288l|>SSh_|V2G(IbGE`>5=tr^=KkJ~az*49K)j#|<4ph&A2 zK|;oQ`hYJl@P-$cXPR4qri>3gR^J*pFDcFWE8@BY4m|v%E9fw8&Y{vey$cp1aq7D; zM25#3r1v4zL2;RPyS3qJa0Kmz5ECh+K^GGTC$2;_x8JjR-|;)8xWd=&eg5&M-N$ii z-|oKqJ+A<-pD>p}eR?>v2f)b$W<^_4s)LDt^n2ZpelJd4^&i)TiYkwTK1*{)S8#bS z%41V7in2E>54Lzc>PszF;;~x#b>35q*jlPKp!nGjV+E$r+FJL-TccsbvQ;%2bsOc+ z^nybMn}(pp$U-sM8NDz(cyzffE8WV_>jq7{Dw)sM z2wf~`3o8iOMrxT(>LS3HLUkYTW7@T{jld-pOe`=&xT|HC5;gnw%2w@&QMm82m<4N8 zOP(-kCU%K#I8VTuK>ya77kJvySs3pi&I~h`!6ESzA!g$7{t4Yy(k;|-22;k%Q)mrj zuPH>EpqMSUx=#c;9JWIu(7GUQwu&%xh5Yl{ez(87?>CRTE!%4-Lep-OyIe#9=d0XT ziv(JVa<0tn$>2VyISCatNG9{*n7$;1jfl#Snn%~LP)R%8h9BBTt=Uf!j%W}`1M%U_ z?k6S-V@HIUQI=I(88at~8v#BWoBm?H=*EkK!hYDNMa4jitb$^C?nL%1JmC zjxzGIG)bPf0C@Es4rWZ)=oH&6q8#WDui$0n-Yxi#OP-DzEsC3L^|3r!Qq80u9br3P z#^1rMsA7$uXeq=X=D3y5_PU***1CJ=$}=gm*bpZKGza<%giH2w%RR1+y8Z7r7bL0i?5MacPgQCy6M9jBq zd8``8)!L*dh%YY=kR+!RV9aqNUwYOX!M|U0zxo4z8XLX1sQnCYXycm+Kwk!kbq^nz zP;?*{^Ji-K-J37f$;46|6uTD~aXX~5WwOQ3^io78dq5ICGm$QV-<5+G7wT+s`ZBog zHYnRm$YiR>p*tc1Fh^hc-ndbEb|5XdR6Y^VX6oPD?QP2o)0^HAMHtoCG z714-vZw;DmC>V-w37#3O7Fwyb8%J$^Cp(+#U)RccQ?!*ARVcFhJ_J2A+;cuv21)UL zR-~RQ%`p=>GW1<24oZI>Jn(Df%aMJ7lj7fvnCS#yP2?&_%%K$g0ga`RHL?tx_V}syGv0O#+FkQ64AOqfydc2?xgTLPWr@MdZ-ufdOJnfQ!Nk`13@ez`NeHMK)38n4%cVsd^+2u~pemwCcCb7KcG4kcb25mV3pI?a8n$XBD{o=XYNNx%=x-(RwlSzAQvn^?$`DfQM;3 zS8o9|@F+E|emvw>0;xpe*$uXxjZ2OSj{&2h-Zi90g%vP;(jyfU#?CbGJM|}KAclOJ z5UR?$q4>=kXRrj9=*rfMOej?scrgkPuy$qF`NDAV7bQU#$Lff%c9RY!;rs7*@7o$) zVDxHwCk~0`hWJ_MfX&r_nXBtJIFea+mkS8z5xuhz)-(tndQy?4LOBxaPnxqM&m@72ws<+h*}$N?w-j+g_7fo8qDD|th+alx{n{2(UgqW!H-;2 zRD>Tvyut9>V+AvUMwo<>y!qKN=|C5?Z6XPeA+ul7&UQCJvHylgwS)q8#)&3*{a^n*-+) z#7~NKACzo=07O+TXE=p(L%o0inlvaF;XTt51R(iSF3vJpRV9Vwhlooz;A4+2(U?`d z;``nWHGn{E)7rLA=rSt65>NjKmiONe(SfaA#$fiRU2!ux6=Z^UB~ODr%!>$M7?#jk zR_l{daxb+Vg^f3b|3W+yd>}EOrvp`hQ2m5`0VM^HQN#6d{2Aua1U$5NJPteu)Xi4t zh*b(_h;LBj2UvvjSwtc>`|QZ-e^s$J4UzP~G2#uuTbB$e2KbM&CtW*pMYg0|a6^5m z&-%1LzkHX#vSyye*BP1cN&NR5AKE-3XS>bWj-6qW0O`0z{K^a~*yU6(E~aKpE26WG zBe;T7(zHH8#a(Aa27;GUoN5S1>-1vgc&uto{BvWU@BLQyz4t;8E66zV$~T09DpNN8 zE_|i1IyPt@v^7?xLq#43jn#>a1XTaHwb?x>Q0o}$PN8s%rIwO>0#wIQdnLQ7dVI;9 z@`+rF5N;x%Myr`+F+@B$SBB~~ymTCHpWaYN7!71i8D-M7zJrCGXp(i5YhOXQlmI_E z0v(cpI{_b;#jvB83h2&s7 zW{+Xd8~$Ob%J4v$)m0ibpDJwZud5}POdcXZ#vY2OBo_3o3^ZFWUWctXDakHGJ*X_@ z9ggZy5K%I;HWdU4b)(t|8%?ckYT+`D6D&+{l-X)sP(0$=#*}=<8-@2{?mxf%lkVMrj<;}yC{v7|S{VM;-=|uKzlvQGCLlZQ<-2Jdb!f*gdQIfV{spP6natw`@ZG|^@NL8X|mLG z9_IGY$@mtD^}51#PB~%B6t%3r$wQ*$MB(8*XSq8N_(_5D42E zhSWgPh$KP1IMUCGTdGMMMF6+KiM55+K8Bj_3O3PcOI5Xu-b4K=z*{`XYjXJzB&l5m z%V3Pz4roE$W=Y$Xk~i>*yq+L6id{@vwD@yHCmY~n_89YQ{BVB>YYQLW1P95R6}N#$ zV(A98h6)?{vZ0TqewOTWxT`dd++RS>+L~A8+X#G2sq)T&+G==>aIiM)Q&HeX8*xu; zO7d%g(FT7lGj@a*^d|22Hhuzoa3|i)+Z?Ty( zBqb?Ua9WzyJ`C>N-Rrj2eExJD!oehesCKt!Z5VjvZt{YrPfUOVAz4dr-0#-o9iUQ| z1qDC7&>&glR1gD$6|9evp!h|r;UV;Y8Nc8VXNU;*t><)#Eq!y}o5SlF&y?0^KI%0- z@*^mA(3N~EtU+Esui1f5FC^>_3xGA_`e52Xnb_Ceo1b^z_7Zom(R62Ncgm?y=(1!) zjbPa~+jwg=^>IRnGX;QfbCkYk}sy;5umxXw(_grYUAD$G^Scm_ErX;%GHa zbRpsUI$4%#M!9>}lVS}sV1x7m@x{yUQY(F<<9K~yr;$rRs|0VNH`Da0rR*``4 z&$N)UE|X7yUCv9eZeKX`d4rDqL`@VyE0(sDGZZSwOUezdLbb$TT@`tmyEnjHPbr@D zjbMpI<@Md(iV)dR7$Y~8bI&-Ppv3FfuD;%+VOGI#G_exZWga0FwWSJ~AbSh-a4P9x zn>XCqErDlVwVtDU;fsQ-k)Or5MwC%(W*`?j^`M*{tQG^TsJIjpDie1F8MG(G@*wyh zf8f%_Ap!{JID|VrJS@A0=cK7`G7-TuVJQzC?-6Bhrggh6eSi@z7ve2q zEpg`v2>j9AKPoQpM;>SiZZ~;7c|ki<`ZC4q?k7lM*n35cIy)vV7^fxM%NG?pLS{@! zhf$6U`pe%RV*Zj}5vQamKnD4gBH_WiOrH5DK1Q0*X<~CM6dv&-S5dpzAHm9d+;~ek zdWUzwm9={xCGQ{jI4-YW5gVdPkahbQ4^NK;C=+TmZC6m&izE)p5;uU^#Ra}r_EMeg zI_9bL+VoNf*d?TV#8EKM4^DZ#Xa;8NRSEy=-T>g)Brr5KGnPZi4QT ztiCv7dSe!BY{)^k$Z2hw@B+u?V6JKiziHS_idWNWac)ZZ@r)z5%A=;-=I1^iV!zy= zSX+})u^u(@CRP-Tjls>?&Pep|i)pEv;)337SM+8Kaq(q$X`yv9SLUlKCo$LKfo$*o znwgBWGg1~>1*%4-3oMzwo2yrN%mm=sHG2*|RND~3 zf8OO&uFy64z3~v_a8qg}C-oNLb}>CM&oIy7iirTqHp{e+%@y10VGUM%LuI+R?UcHEmW#h$a1Rw{0%c-~u=t6bVK@%)v zY}q1bfqSWD3BC|yGu{s_AeDmZx!TBe?P~>%Nn!E^R+-s?YgQOtCZ|~7Go_e@%#+I^PX`ni#ETl3^MOgZ4=Pb700SyZ zI@LaJM6l%m6vD|ky#}52i<^)>qtFU2ioBbj-~ZFae*}$M#l5+Y)8*PYhWhjFz3=({ zY!W^YDpDj3F2vUaS`61<9a9}qj9tn=$N=y|p0F$^SZzx6;3Mj3nMUM-1D+o*W<_Hj zP_M0&Y9jVad0R~V)-`s&;UDl%r~mtbi9%do5`v1s(&>yIKt-tnvwRk0*~alctWt_0 z-y{KKPYdNDT&c}TnsIOxLRa?Msa@r}H<+Uzeh?f;{zj(AXm;?rIUHC4^;EcS1g2RO z?SL_XwvtuCsFYjZpXn>rvAK2;bF%Nd_N#w?_qXve5Gy_;+%jJLYy8vc|ICF40&;U9 zOc#*^kIxflQYziWu@$i5tn^}q*ZFm@Hfj(q_2Fs$1(KHkSBh;=8C&p_dJ>-!Z7JIF zlmzcrlHU$>%1NnE(aeGY-!cO15h*mPea3D79FEQ{D2CdJ{>3&A(J^ySqLf#kmWSgK6z3mHD`xFJ`-eaIet>;HijFrbxcuFtu*?Ah%Wo3 zy_8wzXo0_)J_M-Rb^32G##P}Ste0Eb4|)3}IHxwoV7#JcC%6N|gw&;CJoHnTmja7a zxUZLrC2VSparvjQEl@pl4Z0&%yPz8=;}BBX-@ReTbrDZYsZ&?3N_0u#GfC69$KVCU z07Lz;$r`VdJ$w2x-bfP@)P1eE6XHuAzwVLOIl@;dXQtf1iY7Hor&#bYOKYjbd|Qyo zJZJ1unY6tY%fJt7ee8U5VVZ{IaT_l4G^MaMS?(IJZ6JTwFDfszoZ2`puTUDCnyx_-El{VuB$9X*NwoAJD zDk-_7@Oca_u{o7A*@7ZjkQDmhGj0dsG_lY3e#lsm@4e^0l3n#OVLU9z5;7)F%by!a z2*$mMj~ZR^?Vz;-?Tb#Qf8Up|C;AL0r%IVI3``Hj9n}M5#M)?A3wgn?(QD}%3qYL)F>s2eHwni8A%iBm%ESRIRF=sC)09tis2bzD+>WkAIj9<*hh}LW`*Nq z<$2*ymEz|{ZpG_7 z6eCVY*(3J=YiXz+6LI!}_N2s7w5NE3s)D6gdiH9e`BR@KYAk^4crC|`*o?bRC4(I=v*kkx&lEgqOs%eC`2bQUGMR1X-~d_~2R zU95U=0Fs7#6X(Hr@5f$2tH>D-&XPLBEZ+Zx*NjC;tX3V&_}!61bS=7QAN7%&>XP<)Y6?)~^9g^4ZMQY6PFjDDGs3*~nwX`Kq_ zSi&fTK2dJ0$%-2D6NSeESGKeSiL@?bj>G921Z#Z@=0aCTq-185KH?*{rQd0XzSu+y28@!6|q1Jaw5C&xI?w0 zWFC8IH9>shqy#Zqzm5A56F&R?@wtW^FiY0ZijpEg%G(H24Q&H*w4i z!o@UE58+$~9`LnEA130P)921zrvXHBZWR&Z@yokLX8|? zB*ju2&8&8NE30ve8Qj2%?~4{JY8Keu)(6oK z?8D?)SP#OCreg8O0sK7hY;+lo$_*y$7IoQ;!&>=49qA`meso=!zfV6%h>j%|0z&wl zlV$hfn9!M7LBM3Lq&a0x8oPurxA)uwfI}r91sO~?`kG~M5cg(6XJnHCwK=|G$`rik zZy;dH^47+3SV~_gAe6-8>nLivCt)u6G-xx@CYMV~`)(-lj&xyx7GYE+BOXsry`n3c zY$5ZnPh675>I=biWa&1MEw~j{VF-Af7bSNoV5j-B;~ync}qJr>lQ)`e0(_ zwK^udO9;6>5~D_0^%M&&m@q3DR$wRiqex$*r&mIDg%F8bDU<5p%3-k;e**kpIc(Lg zlTNjypembVi`1} z;;p4O0fEmM+)7&(qNbs31~0m7J$r9OP>1gOf`Llkw%GVHnep*Qp=ng46@R>Y_j}#% zdDW}jd&IeK22s^4XGLR5ul?uwQziusj`u!7P&o)o4~ zHJb~#2z+>tJJju45@RbcC$mx=<8~nqiiMAtO~k7Ff?EL&BO`p@n=?VATpH1xoDSk3 zc!2aS_J;K|$`~ailb<0u6{Fud%GZ| znx*KwqZZ0b?kfXJSpy=-Oe0qgBP|uJ5?Pone#rg;tQTia0ZC~qc))cM+|SQj5hUwk zQp_ZXW!ydug-ZwO8D$f&xWr=c4dl3)#oLct@jLoD4)lA{tB63mGCZLvjMdIb=kM>SHJ11{j#m-?xy}cT$q0eU41&I8*NcAX@oRzHNa->|$V#OO+0AlT%+V zx^D;u8Ol;94>@ zX0$vM19}@9RuxO5bG1%iq}F5SRH>hn=`f#Q$p}*$>tr1cwoFv|Pm7fbPN6>@F;55H zH*~QDL60{;eKn6TEw1QR3i9x_B-9{)_TPY+@n+iA)FVXX4u=_fdQUR!XMRH;R_S>U zEBJ2RJmHle{;%%t$H6m_s8&A<@MdGGI1Jif5lKGIS5XKsP4Kcp2FO33eUrb;VHc*> z3+RFW8(tT2*Tb-b-y+YA4qgIQl~csXgPV%0gL9{`iHlx-rXrYJP$m?|n+h~XuB9Fu zyfU+qJ19q|p5YDYzopU38(dr4$!ciu@MG@?A_z|6Uz}YI@Lo!IwzYvr$;BO%!8=I5 ziy$-!-STyUiAY#WL*g~$N?zw3{O}*UyFa@7b5puy0})IDwOYIg2&_lfAYs#s%ffRV zkc|TH!CXhOKI4bGcatu|I)u}(!bx#?AB-;Tn>*wd&q^c-U;({n-HnObGVq(aO#*VVTjSC^_2SWN%u6_V`4HXy}JcT=hrdRk84@f z#gV=lyaQ#VFTEH-L#Q8V2j3M;qXs4bfNI*1G)ZF(hgb7W5*+iGu(!X?aAe^4N`1!E_=(6erK;oVQ#$#n&v8eQ z$|6x0O0Jk&L`g90{4~TE9GekeLY;}~QtuF<)N}FVb6JYNOPRFJDOLh(x?tA&=W4f8 zGjQA8+ZuWnSoop$P2@wv9^6Rj!|%WU8|H1j{~Mo+8o!%%z;R%%3eCjd`NYdZbqp?R zFrmjJt;7d3fNg1*-@|02!ChDG)K3+3E?ZOF(;xob+B#P@+(7?Yx_jw+4Uw&C= zcGBXdOU^PQ>H9v>Lj)y8tyh5Y58A>%BcSlP;%9}$AyNH2-bNLwYLG6-S-COioX;sy zrXt1J7hZhLX}8ipo7#)4-n>XxNOKjESUrHV8EB8uXh8r%gh#7eA+FjSBzsP>jc$@1 zA3o_m@m;x;)gaks;++r|!wL|*yM(1x@CQa^a8aE0Gf=X(DYIdKulJYRD?gz+dxOz;cn@@ww{2{SD`M_+MH!KWyT?QoLvO&4+hFz($Yewr`LHLwh9%-Rw4 z2p_Pj>=xXc7dsr}9(QF)pW^&uT7tgJZ663yhP6@9Fkf^V*%W#xs1TXN#J(a}O81|q zd)?Z4_6`HV?Y|HzTh~8rZZKJxJsi@XJ0ZX_)FB?33lz}$)9dq6kJLohg%+ZsufFjmqiO4 zpy2>~#Jk}_&pO3_IWVUj1K>cDmfY2WEDE>Onl~Pvm*DMvnLN~r1qjhIJ|CpULOb4= zb^`9CwRQK7q<3{dcKPu*IWdE8USVr8#>x3*yI)-C=-LLx+X-jYeeb3kFw1i zBBkBf_kchNUP|IX4!bokM7=w(Q_8Med-RMz(Z7Yi*JAAff}H+yzEN-0r`0cYhqa+mq(@r(4gGrFgLJUb$NuBa1Yxsez;C z-M$HepV_cy6N|w4wSf5ekq#`}H}vIJdNUf71iZt)SRv_yNU-PtDT6K5%eLu%J>Tkn$xl9~{S|!Zu2AHb z@i9{K63F8s#A}7nfQM5T%;fu|=s)CE$9`0|QS6}=oan109I`I{8!?n#iK>e5`b?u; zlrr+)&m`1Y{@R?Z6jF2m=HvKvvDkAIA`VC1ay8$>!B{Fw9o_H+tcPOfGtEdU$>)w6 z!Hk!REfX1z`HWA$-#y)npKiFRHL2L2+>kae+`pE0kAf>f(7TO-N=X==_uhY5n;2FL%H23pSq6 z852;7&E2(Zd#)(-5WD^&Xh7zZXhLu&%0*tP_cx^Ma)d;Sm-G(0wV(!%!nbjwUHmW_ zdw4{Hq!X)+uWokhN%^NhN%ymyj3A86Av26}Rs15hiEuwHF}H>cD`(P&^=ge71%L^Q zm(P=z;_e;7@x5a7dW?F*M#MeY_O9ocxf zQ@Di>N?iu(eeR@w%QvQ(bD@!$K1I3`T))xhX=5KJrttInS3Xms=;$x;MVumS$rQ)7 z>>$cN!X~m(!lC~(&gD0Nr+PaJTyRNU9#g}vW~x#&<_nN9Tw(XGAVIw4xozL+zVl1| zEIEM|0gDX~2qMJeS7NVUQ;QmZ)+vp%6F;#j@1vDO$iT1)Xgh5&DHtX`2^IkhuYnPQ zhxRz50>pbU0yRLS7nto0*BLVzx{(c zL=mtPDavLC1^q#!Wkz$Ar2UtJv`}hf7mt6(x>J zi2ea6n^+qO-BR`b4Agb}1w*MA0&p|VXp#b+E&YWQMQ9FIZPP!z*Yq<-cEQCRw@FXR zh}oi5$#n`?B=BX1*uVQx_tCG|iWx|jo(pR_^Dno#cuELk+JqQce+>;GT%1j-al5{V zojv-rdlKhCg0i+Z_ySuOJ(xio*$&KXjR$SW&sh#ur~;^N7qOQ3+2R5o*XpQtyaiH|u`dY3s*Xl6bd@N-p8*PURC4 z3$PKY=pZKrv@q_V@*mU8!7FlzX>ReHBiAbwe$j0H`*~9|SkYPU) zSvKR)zSdQOJdKVP%KEepnt;OWifwB%t%G>HO||B)i^bG%J(tBx2bPn0%0msRNiCyZ zHBR-flT3|=lFcN>6bBBl9SPwOn($=O`oYrT*{5FfD?&g>mAVbJB*3cjxHT>a7radq zGan9-TcF0v_J>?eYNzc`680ryWn)jaON}TjT8fpRj>4mP)sJ~W%H9R+B=!KNV^TPp z;#>)d#pQ|5+`ap5_b=lh(CL6F$Tj`5f3`65Km5VnkNMw^{zv`)?tj#a_f-kU z9Y~|KsA_T9o^7vn>-%QD@5(*<)4RWpOAS&@Nudycdj$L9cC^IfJpLm4(tp|M_IF~_ zAE8V>is!X3^ba!!hj1g4<<~QYSjEg|-tk$|=3x)<$mjocU(N(=cuB_vQ{orh{! zh!lwhsvbUGbVLsMYIu#bFE7er;}yct7k8W48?10zbo{93Hc_1gRD1XCSGtePmTMF2Ef9g3SE83X*FPNjuPQoF3B?&*1N?0w zPViu9M$4-&i&`F&9s61btRdi^lX8wEp2{#r;o)VkTS%)@UzK6Hg)oM^_8Ph&CG#ry zm1m&);-|ZJue+4RDtU4WqJX412#vL33(|p+fQw zg>OKBVrgo9DZOvjke>()DGj<0e{}cr?)`uAGn<8kwa#>gSUXZps1HAcy9%wwrLP!b zY@`Y=>F}a$S~^{-Uvpf1P0)~w`6pzv{N~*eWuR2ZP-Po$X4s0rxhBGW3nSNd1N_i% z2wn@ZeBN|XD&=cE#w#i>kvmdFcz5>~an|a`lFn2c3IY-R(nG9bRpXIiX0w^5my1-4jBaZ|i`Iazwx~J0uV?TW zVEsbJ9n!6-260h5;}SF$`|#E<%s`p|_l>W#l18<|@h>*zjvD5#=1kq)-}xD!eUt&I zv#d6{qg8+4RPw3T4DS-eH#gwz6EjnzU!S1#nv>JV@FST1&azVd;VE$=;{CfMISUPEGS}tZTYI&B zVtmNg`3vqg^T~okTc|wQ;urX*`-OOXyIS56-vbm8KMTSpl(?z8)haWvDq@RdV4Gyj z$2XTU?-bgD+gpZ7zXDI8OmC=FC~)jsNwSayHBlLSzPQ!GPI?veoTqe=A@lJRqBKwZ z6v+p9AUJqFcyeZo=nuyk+U8A{Zu66myN5w9SrwBqMoJ|r8hoOa$7c=*Ue?}p>n9z` zxS#}Yu16Rd8Vnlup%qU#EYa((hUSDz^tN{1uxLJ8*#nnoG^+}5e(rPh9OI$vktQAJ zzXhVuJ3m)L)Y0o!ZxU3!d#CP>YV{Z-lqF#bqhwI6OZcYs=F4XHt2GBzG!IbHIPFw& zJ>8bigMw6Ssq?|Bnu$K;npTtSlTYcT99#(--sH$Xyygc%c0sg8#Wv!-tqH~TOne$& zr~Y`B9L8lzzqI+Swb5uR(^5sdw|~vjrw7|2+pRRobeDv<%0%42watG?t8C zz&Q#<9cO|J0X#S@-Tx7~*AmO)>+E7mMs>NSUr05f&_A{PMN}wdR zo*|F~hz=4ehdL-p&Q+B?7GNL2B)(r_cL*o>mhsq{wxfgW!6Xf zfkva|K)hR`GPS2`x)>FYMY;Z3(31d~BtZ>Cyu$ocZARQObz8Wtlgs!56?=qgE>rzW ztDsl-e?NNvQ}b!`?#FcB z;Ga(aG9j^8*$^VMn4pyMl5H`eo^mzysn{YF>a^JTW=Ls*^MFjw@-6%fo}IS5u?Q;g zkTtOg{dBNKM93`PS6}#ZcIijOg&F#?_s$LlaxFt#1_rDQGr zHA*Cq5HMVdt1+M_vj+h#N-|~d1Z`S0&zwN%<>QZoGIR*yao$6#+^pyE3_qo5^!N*Tnx25&nVU{) z2s61{V?tLMfqlb0j#c90E!?^wDRR6GuMDDST2iqpx0wG#v&&Fz;JHhI9dX3of zZ{^m%%;cQ16YR?;fP{c=yiEiS4Sq29$~GidN=e*}=IgXkf#)^cNVN^&pS%+&|NFlB zUdj=UOWJBrJNG60sQb0=2AdEZLRd3Y+VWYhpOE>i)ElQK0;x!nE2f^T2;DQqjs5Mt z(JO%Js1vv9x9a%u-JhC!@j7HVmuvKek~eWP1jF{`+MDy*P_#_!`t^4Dlsop(+Qb(ykmg6%)ElH-sS<=j+q<6CuMA#y716zsA^eKXjq{| z%Bs^LH7RjPw{Zc0z?lUYU(g~#D7QFJ4H>a5rNCBE)6)rN=BwHgOdl6!I+^TC?pQI^ z@s0+#9u{$}d^(#_s$8TtH`EPRH=S{@J$mpkkEp#Xkw1gn69g@ zgp+Z(sok<*77kNwhC(P(G4Z+Iq@MPZ%eYd~9}H8f&`2O@na*puth7RLJC@u=urIV5ab~ussQ=4%00k$Ve zBn?qLBn~r3BA2^P-lKsk1c38>tIoU z9q!ytSDGfasnstPa`Q$&MF?sX0_(&7+ui+F{}m5k32FZVYw_};7_0cPaau6aAH&D7 zRfS^*H$G~mgXFTBIk79DY5c5icM7P-&sbs@O0&|{@N)hdExHV$-z4U@;6pRHpl&=! zy3GrhNMqw(-2>`MgGxu;W>8qMMiOFR)LjQhiPm|ymqshirHC5@#b}m$T`j;i+qSlN zLj`c%y^<454I*Fug-A@$!oT=Z{k*Q{-q6 zxsTM;tSjqU>-DlHdZt{JR1SF~g-VIMxN>^``BJZq(d+Yp?g;j3ybHRKWLz%-5T<7} zL$YrE%nlaaC^3+-!OMlxC6mH*=$eTkx`%8bgvSp&s#W2j+P&eEhh#_0j)K0iv2y6m z8VIIPdkAeai3K|oyK}ZzO~HTw_<^)cz`Yw3COm|#mO`dY8u$Q}OCM7i;LZhP1IdMlL+^CD^2v>!p7@m3tYV+(QBf9f_--p4uD{#8n2_!UFa8 z6}x<3a)YsDC!Xf4Rbe|=F*W@^67A`QupR#U@~WvH~JU5t$|%xF*Ki?z2Qk_O`H~)d#Hf&x#!XN z+oa*-FH;&WkQnHZps(g_m4XP=3AIfYNNPILs%pHwA%TOFV&y;JTuay()}m!gd>pv) zRG8)X8S@FlqK=6>qv8#tPQm1$@zW+Vya;9XZ@}WG(A?_Am3k~1;Idtxe#;i>RL(Lw zzC){^SOnSvsp^8|0V_1^4`y$=z1l~a>Zp?v5Ry-HJOgZ} z4=EsNG&2bS?32&Ha;{SILnNEmwYjwnX8Hpgo|^*5=}TC%FOU<7;--A{q;OjVnIS>BH+@}@Xo zz(N_anYxtHjm~rU+BVa}Pz>%tYAD(!aJg+Twi;mqA5GzzPLVUR*d=L7MMaotyIpD9 z0IU0Qt3lPapO0?i?%vXVV99N|Rw2B^dK@Fp!jgDY_P@}#k4!IR(v%gI1kJV*RH+|i z;BWRU&hBfDiyUY@9kPj>V*F`-e%WPoLC+ayaVy4-*NW`U86%1i>;zfXy6^ka0v?)` zDS|696k8`XvrYf2u79+?8@#x;?qTw0nrm2uF4;M3Wn`tVd4qw-Bnu*9fmQQWL4ec3 zG!wzMj*Bfqzf;1kzInafZq+6PDnLp}bdME}1=EnFYEEG;3auznAep^q48nuhk=v?2+ZuU*(hQ3#=!a*LSt$b==NsX1ev zs>A)Y*oz`Px*cvL`pc=CC2qScUIi5Y|kdMniHb+ZR^s(7!H|aB`qBt-q(7aVifwkNRtieq`TBoJ2s@`NQ0=^^zcV>`vm(yE zZyRc}P8VNhw)y-Pwve#a@#Og#0(Dj(3MYu>OpA~sc{RRGs61(`H|mWX0t$RdJV$;E zLOITla6jIY5T#xsLYangg~b)5pBJHYgDAl4QFr5b4w0SnG0@W*-$i-qV1xe`?YP^; z_wE^ULE&$MsQFQcg0XW=W6d9a{Ev)@{L46^c0UThfB*m(sa7Hv?;`4tGe;4S3D{r2 z6hg-I3DuQ=9^>@5j41zj2^+#_h!vm}1hnxIK~Jh8sEFA$EI$hW;v6X^t-{Di5&oluy+D zK*BWgV$L@njMhR$2!3f2NS@!86cjO9we=7ZDVU6DtAjMz@i4jgur-N?crN`Z;fWcT zKy#XKBE_R%{KMc?fp@H;Kq)8hiWpjPS* zd2N+NiAIKqWPH@Ffz)&odK1TGtN_gC`~PI3C?8Q9Mu5Zu+ZF+3mUM)KeV;AoMcm67W-Vu58vn>yF~?0dP3- z*CYx`=A6AL44}&E4N4*nhZQEW%Ah1YLz6Lj2>Z1_Ud!@=fA$x*#X%RAvonZXl*5G5!@p^)kdcQOA;bgOvW2y9Y;@g(Ab zz`1L*MEQg&eN~~tO&+HBTL%bC#-znAI8oOK0$waQf*CS*VbOA2bmYSdj-IptUlCX7L6T=4tq zsG!Nml-_@=e@g6Ubf#x0l2%*t_O4+&Mc0BxN1UF?azxZ(a)SF73sHPVV^O^&*stoM zR%<$%B8$gG#`P{20worxk+v;c8LwIdAP5rs1lniNOukKOYEPPcx$FK$i!x?L>S!qp zppQE|E0jJ>nl1&Z4t593A(6G@Y6%X=FJf)TImIqyJIA&URg_(aU9cUcV9TIm?Pw|y z#uw`*M%Su2ZF9#VBtaQC9pUzh5zXY>auaW>Ch$S>}owC}o;Pk%$L7VuIFT zHq3NFuD2#fX${}K`)2p;I9>|IkQTS4SReVzR%>bz)DiEN&^HaGRspdWIe386Y3>6i z-7GYSUwzR{Y!-q(!PT;?z86b5V9#%sWW+Ig4?37Ey&-b+l2Ixlsi@A!Zj8rKeLO5# zov+0^6tLlFO2q>A3!Ks-bK$Llxo_gdhjoD+0uUtx;>)fuI^>f4%{U(3Hdc|J4sj{kI0QE`L=~vWe>q|?8DvQ}*9x7VS_p+h zSK~NI1xt+OF^ zCo;Ord9Afrf6Xdl;?3`?Khx|&fll9GA{6{C&EOetoso9De6DD1Pk>cHj)Vu=2NjfhLgoRs?=AkM>s^ zG9V8pF9y98^{$mDAe`%0*)c8`BV??&PgHMD6E0Jb$my%HdPPbu(#kkL@6L;#IAs=M ztNIIB^R$-Ms$OP%{likRu!6CpDJyY66c%w_Sc9>Djdp<7OVs*+kD#|qU?nQl!ZAoe z(}XlbT8Aw*9VZ4tVuI9EIbaEMucP`94cOvV-K}>J0tHJWuI(CuN!7%|e(^2@VJ>xb zVe{lgQRpvmUj)k@#+OKO6kGZ9K5%QNfDHkCYN&*?ps|k%z8f*rTFSAKSR7|5T%{}{ zi=;1Uv>l{XgEk6iYH^EZYp|olfdc!ZaO}7!tXsMO;<`Igp+8F;m_4LNGoDOjhr=m) zR1OZWXyMwzQ=dFSy2Tlz`V|!|Y^l64s*YA1KIT605rqM~JA!=~6((edt{IMw>$t34 zn34Bm(Zq6z9Wn%|)C_{Q55b{a+;Mpy*)5HAGX1s%S9FfL1JEpJ)zByQ_OVZb3 z*I0&dSp@IKg{=4bzeHTn;iQzMU*l__;hGRjMj>#hm_V|+z|!fD2`$_@L?&g>gWXx? zJkbx6R8+`9H*k}Mm^lz2%Os=-%o;%uxdQ#Yn81-yahtfXLF28 z(4Jt>HBJe)z!RmwiP5{xTUe75OrFoT<`8FLQVt2xCe39dwnwgx?_A<-dFkuiP=d=| z3mwhz&l~f^_N4Q;#`n^jifvs*<#A-j2We5(}@jB1zOT{N2i3f`AA zM!ZsqIx-!xyQY8ASv-WVnfM!jl&(C@Kaw5esM7;pN0x%>PsbDSk3^~vSoAz3@Inv} z+>BsJ#>GD{N28JCwRXNktnxVTkUZlO{XYPrD{hU2_6G?1^dt@ilBd=ouhs$gj3%Wf z0SfUraiTZ)EcpX@XqVMsw5@feUlgo#{O0(Qcr8Ho*q0bv8OPWc{8B9Z9-yaDt9A3+ zX%#xS3SRrxx4LhA%YSQeK%e%#83}L3>r@Ct-9fnqLo8AxSkR)yu9C{KR{gkh*ooT- zLs`^;|Kt`GEI^M5CvD2!Boi7E{mwba2OZ2a{0UCX-M1Fz#yJj__q5mbcGqKhoK$Hj zMKiv={7Z{-m?a{ZP*_(SRvKsRN-Uic3JBtGU7ba?St4t`BykXAM7m=jS@6hQ5E)} zH=$xjjGNlO<4cLFWzK+si)JKscMf-yKGUFTIT9*?y~*7XE(twC%eBGvxs~NrQDkQv z-ZW!#zt?3ulLHkO5vw)F=@u2L9AP{vNbK15uB`r&5W+tt^qpE-EeL}`G4W}NC!0&XwYp&$_V3v8u+=0s*V(DeaEsR5?#ByCRrEBOPl7bDm5!F@q zI+#n@0+1{f=^{H9X7(qJz_kG6@|Ny9Cocg^M#KbVPkW1GbH&HmXC5=+p*e`qZIzp& z%l|nIJXu+g7{Lg@7h5B~1UFn(p!@+B5O;a$W~@tb;v-qV_GuJ|gw?f+2k9Q{@A$+S zHc^Y=Rg`*6=ioSvr=FNAl;ifsdQcVXz00UASGzWOS-(dJrC~rctdi#t(aEWoO9SDt zqh1cOLWa7W^F(li*3_AnXcLU7W)r2a1cNkH_7Ahw#`oMB>AR&jsm~Ws3Ud{6Sy>PN zGUE|AnK<=8!IVHVDM7h7#2UX=dxhUo5D5Lt2@AY%+Yy?*tbXZtf;PrrKq&Yq6nz_- z(*RqrBs3V9OMhqukFMl<2(Fjbf`di=*?LPxV@apwb z5q+3nDfeaK>x;1>r}fQt^2_T+@xtDG49?ea+al!LI86|V_OJL&9q(8VG2VK z1-MN%7R!xepKyrBg{WU8vNR1V!JeL$v3Er9K!vAB0(0Ty4IuVZFA8G%rxte{7nYbR z4{O3{d~6xEdO1)AF6gpGaQNbOQp!BTc{h*Tn@><9Xe+rfDV?o) z_IiAR&uBJ7t!bF($q?l0z5^B5OLjlmx zw!sltRTT(;FC=tOUfz3jNQl<0uJFgJHjk(n05|iuZ+Unj7alrc*RYncW|OlOET_jz z{E05kzZh&y22IAu62HveTrD~m=jmx$C5Qtnj%G$6S-84!!XwZXB?a;AZ+GARcD(U9 z-!`Kls?)~BG8<)#*wD;)sCxW#cR%xLkI(|uC|BZSZ_~WILWmL77vD!P13pXmG(g@| zxs-a9D7hvJmI;h4ZCfCS8W&h1Hh6{-dKnIZbsAD8eT5;=qS2{WaRRN%RXKdq&EuGV zz57jnZuUb+8k8b%0bt#K#D%&o96;s?CFs5soJm*WlLXA#Ahb~dN-knFLev%6&PvOq zH%xMM{^4f(#jEn+LpAXl(p}0QWe|j$pM5|mIbj_rghY7C#Ux7VGwfX=C#R!=4rQb! z0Yl5NDAz`kWr|EHH5Z{u1In8PeDc}%##-A%( zgTH=L1R%;-MTN=hSX*;E4^!x=5?S$V-t@6c=;00Q@`7r&6i+SjQM^x#C{n%xboyh;dO9pZj6y5-QLK$xuhW!q*J#Ak%W)~U z-a&Qn)C{b1qo_J5fpvP)5Xd((e1)8kdw)n7^7y;e$mF?3X1Txr+z2c&1Nk1$HKNiut=|tt3Vygk z5JX_1NQ0ovH;Pa4B)1-r2H#$bKgl`H>xMz1;9rrI^rL623_`YQ6Igv*RBm6bU*T?ePNU#KB$u`fthb$f#CyA?9aT$F@$JZ~_N`j$g)55Gx5fH_S%~Ckhr?_}K z-l2-%KMuXL{u236iFZOHQ+$dHw<=SiL`$KjGG;3HhzF|Ns^pRtNggbBwI!A?EmS(C zsi5+slkyh)C=IBKbzyTbIc{cR(UkPIHTkl?x6;P2SEVr!zyNndO|co9pMb&6anmv^u{V~^e64@IicHe}h^XgDqS;~_cRCg$-|2!0LO1 zT5I^PN}g8|%dr&lakOoKt*4o?QzLiK7x^=4Rd-@)vfp>90Tr!WaIgWk*tnQVtEiIc zB>451{7TS!SD^@`_CL=+aLhKj=YjarH@a_pGpeEQxuYQ&x5<$*ieejbHWg=!pRw~*1ywRpw6tuHqoAU?F-+W*L?b;ggM$}9E`RcI z;pR5Ql8W_I9!kMm^7aqt5Y<=Rh)E6q&q0<{)B2n%SH{L8z+t&c`&D@~)x_;}Pmi|z zOolLHGh+2x`l4qQc^1sUlv=}qW$JM3m7%)i3DTr!6Xg-cam4BlTfJG_7o2(j^%>X4 zYe2ISP=MA};($+BfNxxjgknv^J=p2q6x(z$9j5dfNLe9fi4$bzctgX)&s_8|fZr0i zS}mdjLy#tMFlu~!haf7-Abm2Ke$$MaWBq*9Jv9+g%y5SgGAaIAw~ktOgwR7Cm?!8l zsVoEmVACD`6don$E?gQCHP*x;L0Oz-<5b`*pLY*US%jSiH#!p0N1ZdHfdS~rci3Iu z?7nHz4?z@2HFyou#cbRQMRp>LQNJZP&u|OgqlA8NJd>b%hq3Sjxt&#g1N%_^JA+K9 zpMruq9FvziDbLsxCEs-QPLWnIvy=1yfVB^E&kqAG#B8W3hc=(Rc5Dv-^(O zW}v(isDhh1@dnfs5!;F1hj8evrt?LNRO!aT$BaID5|<4dVF~0O?cgeUo{45H(W|zf ze#=xQZ*d8z{XJpvOQ2t+@NWt9n>unE)r~sd&jq`v@FbiKxp+n1ToGeOiIBI;tJ2K> zVkvYR8)iqs@~Xjz7ZB%X&Z^J-XmI@&LF&w#N=~+e4;NXWhu$R_PcB0ohl1QC)~O#= zjNq65aE_^ZQ7-7V$`0zSCi+5_As&dy;|e2&gM?QMlbB{#s_P$4UWas}Na}&NM;5GH z_?Fa4VB(kEc5P!rr_>f!Od+)o4ynemNfO~8rx!09>(N8+PWIALu>)w~qJT80QjxBb zc2&4%dT&a{h7cc$=b)Iq359U{(^}WxHPd76F8Y^OIk%y*49=sXU#D znu7Aq{CcS3_#^^UeQ+9?)_Y87N?6e@WFt3 z*50XV&r!m&1n%BA8GKG>w{(sOjbH^$%aA27yCt_Ls9Cv#D%A%8Y2OTu)SA>PH&D^X z7U2fnD08Md)m?e>(8pUYX7Cr<`@`fuz5D)|>d6OpD z3O56x<8$WzF(o>JNN}QIWFw7 zVFgG-D4o3Z7?N;#G4#2E<){|vu!7#+5+6B#wn()9XYXRzN-0WZT&u|8nl75OnF;Q_ z^BN?4j#WgPS+mtH6i)R_f>IVyfnI}o#m1tAbrG>Iy6;hoosPMJ$GxL?A{hXcYJ0UD zJ03`)3g~QT(D^A$n|^7-BDn`5c)_wvMpoZsCI(A^gHC6KfMEB-IHP;$N3m*eRe-s) zDe~{lO09`1-KaBoHuiA?VwkvN{J3ya6J4#|!JYFpiaT|0i@kkLn+KfWTX*MFwpY{Vdk+DwJo`G42l z{oHE@_cZKIN`gq5Y+T|1bT(aNwU)EyMElUFet(~ssrnB4$-5Xyx94R`Dw4^-qcTsp zK|~Auj6)2)^Ff_>Nu!@dBrG;f|AlnR-gW{<`)IH!&@23^u!rDpMLg~(9#IqKo2bQN zmb4CCH}FP?zD0~C-rhqsn8B$zT}lJ5aLZ}* z00Sej%Q;RzZ&ln?^aZ&%bqc9a5;cChPIMzatZy{)MrBw8dDYjO%CDeTYi`nv+xWme z-rXkH?55%9uSX3tYfBPOWpdrTN8(9dX*ntL!xD31a&Dz29d)p7{d~2Oz0SOh zu$hC4`J3#sh>efO%bg9HFXA+hLQXId3jGl$Kt!&DO_7{**k3L`Ii@H#)vR1fVaAn|EL5elX?i+qWD${#K17w79Zyn-$yy%=iTb?8cV`1f05o$+C#zc_ zEKa4Y9UukD_Ev*14fuu3^7{}F0!YUPkf3BWuTzQjZ?O~PTdzz?mPDyUDUu*(3sIMf zOX^po)rzV;qJLl_-;NFrx_I!?a}Qd*tIOh!Ppx#MX9_ba=tcdll`V)pRa;Vn9((zv z?!zD6{o&ouyh`#fr8|IvCNIoOt-4z#`(g~01=?_o^23y&37nfh14}K)>C+LZlsZHO zX-69tvI~{_iFQw3g<83LCm>S8d&T)e)a?G#@3wkg_(ivsVI@S|+oc-#9!8a7Ho<`t zeCG^EJ^8fz-1qO0Wo8&tK2RwEP(>*-WD>lsy4JssXHtsaO)Sg@Cz>Wh2a@ZQTdT4{ z&7HF=m9|%Ps}iBPGJyUNAwZ$63OgRQ3213t2;xQ67m6ef6)A(GQag(>|9Es(gwC2! zmgAC18fut`Q3pI>Kvh7on^x$vW1JHWGWYy8{O2Wq_J*dF6b#3ashzn`bapQMIgzpQ zE0RXYo*ElpMQcsxUu%P&wqhQmdaIOnDgs3lf;CSNcGw7Qyg}j)+oGe|vlV;#Q`_A^ zKMwKk-FLfhoBE6D21ovArgJ<`D-JovP&Ts;qmrL|96!nhn$+QtsC;yf+7|oygCVux zxQ$*+zZ7<;dG;{7%YMJx^%qYuY#$8)Vf z)A(cP-4>y!Y(fJAHeLGkTz932vbDwm%~WfnRKs#6s~VhkY-iyq|B3G@9cej7>Zj_v zXuil%VUc)ats~N`Ec^L65~vk&g8a%7vj8OE2(W;O56D8V`K+D?ypazPof^66}nx#2blsX1V)zzgzXnuMk)aMU;h3`Zz*#nxh#%KsD94~i_hx!I`$XE(iF>TOTdovTHX-peFILT7Kq4Q$2`2BaK9 zy*!p2u1)k4VVc+uy=oqK#oGc+32#xsw9f_G!y1YeDvXI5FV>zAgr!uZy`c!e$!m#n zmBR)fwU!-EVhw=EGBu21lU8y4oN^G>S!y3}TD;6t^n}2T=n1O@W+@S>)EjFxJ9wlM z_^4X31Xwe$clYl5yg0d*F8n39bx8nPwPtbQ6w^x*ehSda=c~*ngg;K_0pi)hC1qTU z|7W|J3sdB>Y<#S5^At!S@U_`F>x)Y*E9#l8868hm~v?x3;_Xd+mb)BM!4nrY!ZYe zO4RMe4_Kskf91Uq8&N5rNxh*gy{tViV|F{gQHIrq`!>%vgMT$Yn^0;AP1Aw<@P7lZ z4ORqck+=IP-6mrxfayN{)UTmkIntaQMKNXXgE2u$5Lh_FMST#hgBtNb!jG=t*v71Z zT_+{l1yifLL4^^aJtZ0A1t=gzKfJe@3}I9~M{EWeB7nxlNV}nOSgDWT5E7BtUMBKE zx3k{suLnJ#(s(c8I{u1p07J!l z9}xzMjZ!jP;ZGFyp+rcQOiT3e`T3-iPjCGWgjI;N%qq|+0HDP|>~ztH7tIxK6o4w; z*eWhPEQ4DiC19nbH!&SYnk1;Py3f3Ov!)E2(GbDwM=G(GIV==Y3W)7Px|(|7=u6xz zdqA!I0zt1>xv-g>v{xe~_Hx|1FnIyEW4GN2AyCtscVcx0z1#5j#3KnLgEUC{g5H$a z+Gcn->pni?g$|K|QZarafw9lSMNujQK(Lqw*XQNPd`bvMg6Oui#%vhS{c%1Q-gqGk#Lo%;7_Fb_fqED^W>;ncGlO$P`;@*Q9of+TA?F zMm_R@Qd31b(W_ASqYMw@4wb!1)}!3bku9D0{zpL#Xo1?**ti59$y<7p2nsGJmV}T0 zx+J5B=KZrT4ebzfogguD3`QZ@7{Ia_=dTSXbNv5kDV-`+~V_~J#pRA z;Ig5LIW)fxZwqlsoYRYK2GUX!bX+3MY>1O@sN32|n+t(UWMXgfh3mBb5pGT|D-wmc zi}QXX^C($L)z37#2p0OHTk|WC-_@+@s}*`x?#x7;9oGwm^V5n!Q5Cr~>ROw@WR^l4 zL{cL^MHJ+SdawQxk=vk0}^EWf#)5~Ziz}_>cbm@;p3q`u8nezpRBkdN#SY2TbeRO>p^`%~x zGD3wx^T1FqY&sTp&{2O?Gow2s(as9P1&%#l-;5frM$#JprWw(}5b7B<&*zIF$o_h* z-|c?aR{Ne3B06JL+j!U92!)U&ocvT`k~iL^W>|TIQHz_sj3q95F%W%^FAHH<(~vx> z-i2T7zVWaAmA~A)i0Z`92kwNXLvH&1mhg~S>%NGA-h)9@g{3jn%%d%z%z`8~<3nIe z{7CwvCG$Q8u8`u6$pD;R32$q{4YqBLnmjTE^h4IYP$`!df1#69E;e1IhHz+(C5 z-D9DUeF_>LgWnauko_fSKw=t+wxvLoz4}>MrMwq28EJ%e$Faog7<1)37d^my1d6$x z;{f_7Km5_%&%1y8KP+yKNXayQFYi;=gIK7JNS0yoR|h9txv9K1KZe5w+Rw18_})HN zmULk8?C9ntyixP)f|YrEw|Zc*3BXuA8Va{+fz28~L4tf7WT+F6x}W=$u}&KBZYg3y z8xY*2p%;7K2gLBfR*%>Wc7JGgL3J3EDSC*`)!6KI=pchoc(HmouB9}BUo;9j`BC=s zcH(OQ*PS4#O$GehEUD(Pv)fQPzux_JJioO|q^QBN{Q$L3O!T!+oJ|M^xMX0iz{*%L zZ%Ul!n&2mTV3sI&qFLk2uhO~WNt~nF8cTt^u_j9euL`a-?EVe^bo!SU)8j@SpKDH8 zkVMgda-Vm)^=uA+jhZrzM!hW#x28^P8p!tqlB(E#&3CWI&igLbE_ipGyOSVpYVW1c}a)DM>*U zfaP%rqAS08iD}WLH%stUX;MX=Vt?{TJfH^mz&*ph+x=F&SsR_C*yO^sP?1|a zB&sLniIv1Vu%r3Z+ID-d+t@$c?Hz6J@5M%CQjhZ{wN@PY6C#1Uve~;iDS`<#$s0$_ zLMPeNy}@%8BlB4tU_xJ{36Gsx|0tBF+X@M(D~!3zNABKzM%(C*gD7-N{UrE9&6}~e zrb;e)H{8{?zCW#5*OFHtg#8&9ieekAQG%0M*Io-)m_JFES{G@GiH@P%&=rh)WNt~K zG;{)HPP$}F=DomPsC~LHCfgm1eQlzdgJL8ezh)D^9LtkwScj^;F%-y6qYO*H1cmRl z_b}&*-uME+XqKb>At%u?(Lp^ddVp{KK4#E$(OQUl`tsFo+xksQZ=ZNw^i>!u!Y-pE zB@+z1IIG&uU}uCU$^|SL=9FMBi=Bd+Py_)vzLa8^VYM|m8_QoPta3fOv8q|)&zfrf zq=sl07?M$mBQN?{Hkz2r;QH?DQUTYsgi8O@?%iK&=KbIO(Kq=BUJ&}CqyoMYz%95~ zSy!PaJ?H0Wxgi_Hq-1iI9t4;tV=) zvm+HZhK~zG(9DLoHhm%`#E?_;a#t1VB8gh`!a*}%ED_bi2f6+=8+0-EZAomv=%kdF z5+X_+lE^IjK#)oVQe5iO?kbZ|q&hU+BhD0Yj%KSqf$F4-m!XDrjK9Ljy|ny2c{8IA zT`R|8EwORRnYD*<9P14e6U(5r*3j4UTICzmxTF21si1)#%hn5016S8=jm0k9MzXXe zGBq1fpVLA?89PCnY6Q8LwaY}h3f<%qD;7H|dYMvUj)*1vL*q{x_r)W&_V22XT#{rQGk+7gP!-Hf*9PCrlmUZeV z*%!@Cm6nL}B_cxKJkSHpgAXC4h`{$_#TvndK&69NTnKdxDCs*^UodH_q&?x_c+>}~ z!G2cGQhDEH;Vd628ChehYsZT8=Y#Bs$Aut_O-heFN=yz}eE6_Pjybbb01YjJC{H-% z(`A_G4wlS#U`E{^GC87KB@yX!+Q3S2m%}ZJF6y+2LmHh4`#!!biS1pq@_1NoD%1fi z5WAuO20;O8(1IJeSzZIjB_Rv1#AYQzN;g!}u|CMT1TgDch6fP@(bgbJCGIRCBgo4;Bj-q;Ddr_YQ7tqtk$xf#8G z@umZiMI(J5ZR6w*jPktbDMm~uo(G%JtUu^XuKw=laUu$@O1|dL^m!LnGkzY`={arHG~Y>5Enn{jt=@} z!X3ru{b#TRnEr7J2sf@&oPyeGlqdBSOetz{r?rI3d6JeC~H`akYa0s%^vw_K#o4(2AGzIZ1C*UKaAMSTtMA4@~(>$w1Lo=s3tM-gWDKcX@ zDxcn#_!YX9tFW@J5Z1bvL^Kn990XFqM?FhQHke`Vvn0m{U@3)+bmE|lgrKJc+MjDf z*xRpwLAb&}SMuqf#D72C?bf|xpg&z^;$q%NtW1?wgM_(oS&8DuSCvL674-|$MJec= zGXtY#BQP%vFOX1np)F6OG&h5%n3dyMgC`km^x+Ta?e7ygB)lh(U#NZD)%fF@8cj6s zI(|d(A5I}6xmTzE2^%3A`ytZ7CUTfZr<> zAMo8+-tTq)I^M$AJK_4O$|6D3`so*^6)XrT+u9SDB3!ry;hDw^78D!xVu={=a`mtC zxzU0rOVP&N*3&h|&`z1Ti}n=+!dxrZM;lw`4Ju=ImVLx&hitMc5U|W{D)6hof$!1< zzD#04h@a!+G_a!=j@JbkK$j(W^U;&;VbJcQ?l=8WKBKHV&+r0UU$;UPIuU#v6i}q% zDzmdB9k3VEcSCS@<$f`_k-$*VwVRrQWQZFUmPSmx|5=CuqC-zUj~_n#`|j??@dJ6l zVR1DjR`KGwh_SA>++G`EayyGz)Jo_o$P#mkF}@{QzIxv3bOu2$G+ohNM_75(75JR` z)HQHW1pvSk;U?=N+(v3XQoxM`>nL0SGd9o0+=FJ74gvey7SVGK`QOI^_2{23fJS-|CGPu!ubU%>vr27A&lPt zr2EK!-$OXmxX%1qtvlp!#`9Z|e#v-XJ?f$OdLAP$gAFJT_$;mspli!4<|>Se{63(l)<^kr zB!ahxdczu19wG}Xfz;l~C@&7CD=G6!|c!6D#;C|>>q8jGGee`ElbR@PZmM(MQ6uT{BR=&bfNFowQQ8KbvfLqeU zX&#be87Gdq-`$kc5ChlYR9iG*hW=b)JyJf3uRa`tU=Di-0`JUTM31nDf!QFR_NHK#4A<0zNz=g+VUKNv)Fn5#8I

lh1q+1kFRj*U>5~*l2)Jf?xaHW^kWcrdx=p zthGSgmHsMb79n!+i?t#d!Rc+WfC;PdnIfd9+vi|vb=%75oP-v1ta0g{U<|1Y4#tQU?C)CWrW0O2@w4B4oP*CGgXBy5~XM7IL^K zI)ZL@YJO69PPe#iLgLl$T~UnfWu3mbs9ADxH_ZSMK*(+%*DiZ)*?2z(tbSZx!8;5E%&WC*gmf{~!D7_(PPLob2tOOf))jujy0X#~rq4hO*pjLuaD_2M%Ir~z7(^X@zi&R2X>~y65J>(Y zi4m-A1gT zKVlYBJM7p;Z&hk6K1(aJwfGAhdDUg+UAB+o9NcOetz|=RvK6Il0yl8s*(b3GkAd_L z+JF+I%?2XLLB}>Px;Vacops9(n&7zLK*_3e_BT)XO-}(>?|ybcub^s?(l!!%ztD%= z5(axZiRvh_y&4pf$H0|^qNYQSY8^AP#1O+)IS+>&gfRjq8E2(X1-uwk{hW&)f5J{u z^2+xa*YN5pqnxLTX2kah4SeQRJS)=xxti#&q(5S|pp168E5IG&DF6mv`gP3FKs|=E zIPB8I0MLU{G<&Jb6v1PF{moJI;e9ayW5Gj;R^F7-9h@y;Q_kL$ioiZi6Oo=3hA7XH zcs7KN#Q>v<-OBx&j&PnVzSkWbT2JVFiAG?YdeJj#c^dS|A8XK* zX0n@ktUN?vn?5QKa@3ibP2! z3STVg>9ARh$rXZ7zKlIg=OtU3%L?1tQl}@DtPnh?d?2c=yLRkLn+E0CzF1D`5L;_B zO`ub&22!Ia6Og(#@K<9`6T?pe$^0{)wk&4A4?Z%|n@dguUKTeqNH-=mn7k&C5g`wM zchowq*5JOmVsP3sGQ_z4>B6=lI*;h>S`YgN4EvQv4)#_x?!NUP9weN>P zeQ&lGUi>MRnFLsDY(jq<+?=2tib9p~%aIs*2g7DQ8=yODz8^iL!ywJuuh>+^dxZUq zuSvrVimS5dv+M;U0p)7Mb8(L80#_O!@Rr*Q`wFir-)u?4(PkQ*f-Mz{axSYO9*A4U zIxou^4VAeY`9&N)dS1eo+n?Lv;@}|qkpyj)JK&*=h~cy4lO+2q-jmlzpjG3>AY`)w z>gb)-W_erz@ImOA#0SWwiWDSml_M!hP0UfOHK6@u;ODJz6-MHP6}s3jm_quNxprA*rtNHZS40~qJA8EdNL^JONEuMeDr zO?BYz6>7_OSPYxfcK&-k)D{+cP9xSjDp*2bpAJ-&d13h2~r z4nG-T@iZ;%Zyjiy^YMAf9e^6gU@|Ur`x%KvKeR_j%^LBhw1&aR)r4SV8DhNVbT|-| zR#cNw;K20#QDC9fOs@T5_aaEGgj)$w5aGcX!HRUanyfn3z&H;_vD>sBnfutb9gka6 zxrsS}XX)rol+6L~Mh?#(Q8#Mh#r_3@W;H!8o~Q{xU&K)Yz4<9Q?Lznom`kl7SJY>+ z3-8Q$V!N1LKr103!Rz19avY^TO%Zp5(eF0Xk3Q=j`gMpXJ)9zvpe7%t0Nw%xiB11t z5UR&nQ_q3WFOGo@S8>(?wX0+|ewGs~C~8Qw7hp;K$ZdQhlCQP*GBAacq@i4pCNfP! zfIPT)<@tStRUIlL+m{8H3Ivv$V;K8tm9<%%jg{}N;4~OSL|Oha$iBq0zU0;86pLFh z6rwv;)+9p>G7^;*WU#h?S4!vPCJ#-9*m zod%pRcyO44Sp@>7fXXZ6b}S%*+VS&-sEwSos3!rbIZEzisGf@QKGLC8)@;chljMbw zj>|@#cSofy4P9Z0cBaU|b5}iUu36#B^cjcg3$we4%fSkhRiPkqO??G+^Cjg zt#cDrkcPFRq9$UPO%|iRt23!_Lupri_>pQse;N;;x5+fch$LE;4iXm=@4Of)#7eMp zl9h<_NMYvZxz++@geJV`R=h0X=gG8X4C%xF)4l&`92mDtTD-)RHid+&Erk0A>&))! z?f8j@nh8a%LokWPbYI(6p%fD6%EDl?P5*>ha`A-*$__%1M+FG@7nKFBT8R*(F5;5v zKxAWHp58Gb9GeD5ta*Byp`{Q1XZQXuEhPv9bF|b3xH6a0Lw=P!gi*|gY}$z?=zOp7 zMB}AH0AK;1bZIPTd>562@y88jS?wJ8<+K1sL*q-=F&+$&%h)W>qBXp)3`ut)yd0;0 zMRJ~Txg5BEbs)TzB1b+K9i`-t-whq(M*Cw2Z6MpYv@JvTwpNa{k?`q&f@aQH>MleS z;Su)Az^ypQ1_|?;S{6d6G9M=G%%Jnack~jdGy>M^2I&+EV`XOQPPIvBE`NR%Z~}`)A)+AE*~nwb#lMCmf0@(l>9o47^a>VDT-qh@ZwL;h8( zeM7!w^pqP+!p0w<6+4A(kRxszbxIJUMzt0NBHZm_2lUuF8SR2IrYj>6_X{7xGtA1; zX{OoUpPO9RJ+SDkXj#%=*K+b=+mAo%9{X{-$fC(fqHSq|o|V=za+j{nI_vlOb8kwr z5w^W#dOUVnA%fgGiF6AHI2ZAN9;eE$>e(BZm1=>Kt-5dUm<^0nvut-}uk|vY9k&wI zV1+~3qj493$U%v+(M5r%FUaAk>_ zi%wth+SWG4F#}oIpy@M6gz_ay-~&5rIi}=!0@1ZM>q44N;$mUVlup{MFW2ncEQ=Z- zT+xsT*5-i72$s6An|X>CUjBU#gKDwvf=(Z&@WXzsy_|(MFGH;WekAMB#!!JA2lup; z!D5&gH?oAqD2fAxwU{j&Waaps(j+Nq!KZfmgEZ)lpqtl9 zfG&b$-NobO-rc+1AwSg@iU3FM2b`{k>ICiK{WScE7%BZ&E7}LASc>S(ug|ZgOl6wP zlpcmudcJS*XY;&i)hWl}cWiJM zX6ODHYY>qfSBKLSZqt8OIhqR>I1k5!Nh?f*BW3^bb>$xkVS5Erx=}K^4yN=n>9wNT zZ_5;AuLNqx5($}tgdR-i@ezTz?*4qyEc?%M^FX{`*bpIZEJ%kQQl!a%nj;X+n3`VV zCXdeikaF#{Kj;}TijD!T8E}KFVh>Z~Iq~`1bg`cb)Dr8&+r@om8N9DJpu1XF;!d}E zTRmE5Z}oyAlnX0+Qjv9tVL)SD`sF&?-l1hR*=&y1A`C<=HhMz7l&h*GD);6@hB4%3 zCKIVUTYggr@zs65XmT^gCpswfHs zh~u<)vuV69?Np*xgqJ<0u62)7D_<}3RDnVc&A$PXafZv3TSkD5)4hV6D0l-P8pQb^ zc#jK#7a6Cca`1@&AwFbM5?5HHlBls{6^B;eo{kO+Ad0WT{JO;{m(;S#!p6x0`xg+O>$!oG3083kMBE`oFd#T9rRj z4yea0$7w5%N=1J6?uj}b#M;Ie@}`o(m9=E4^;VI>T+C~+>?BGp%(s0y#vt#m#~Oh^ zj?!K|C0uGcY5qENw<02cLqh^;iK z6}c4kT#j$@6Wl*t|Kp;yiO3x6A0C+|Jqh4=JOz#)d&dfIIcxWEElQ`#Sn*&YjqxM` zL6i&@<$Tx%wqk_%CWCdsDk;KjrA-vOi;Jf zk)I)R)@QJ%j<53UgR{T$o$fo|iARG~;BFNTA^QI^L!mfvpZSbQRy8?ED5AKIAaDSA zBwKv;SyxK;kktxwU}VG#@4;@dn1-RSU>n{y2q|}qVgQ8{3v}R(A|2A`@ys@eWM1MF zup+|ixELA4_948#uu9B1dyYR@8c|M+Ql+6w&)@BJU#$U-76oo_eEWEkYEO$U1VsS-@ z=rK4nlMyWJH6n@bT6sG#VP$){_dnh&XfO9STHWcMmNu*OK*K_>xzDG2-D(`=kxXlI zqFSBSzTr(146aK@EBb^C5y1DuMB*_A1f+}Z+Z~W0on*9y;4arJZUPAJ4C}X{0?1q6 z-WBBw%3b^NezuYWyX)dD{1C7D9%!EjLR?8g*u??u@INufLR1+1TES6YbHt{;F-7LMci-3J!|(s&@Bg>{*ZsHtsK0qBJ&a$5h85l=_7b!{r?55w zGzSE-FaO2;3TUr<)g|*a-m?O{1lh?kAXNOG1Jcac3;x9-$DHxczVN-cQn8R;hZZeG z7ML?vwAvk8VJ-&G$3-BVnibf{H+z`G2*_GXFtwJRK~lB;P(n4f%4i1L3#y8sJ3?Gx zu)PDiwpeCfj!robBzLS96B}14o~6-?$uj^1Q91(A2Uilbe-g_;bT=@g=|aSGa@rLE zy{3jF%=*kHGWLdqC&-o(vg@k{HCAuDV|};l`R!{07y5J`Q@bWEcJgIekSH5toE|H%lJzd*sHFuUhAF}!L`0d7eM-Wd3&qaXXwdt zMH$$n{I2jX!56I)=0$>SDDq<~r7VX@2hc=Y%zyzYF?Du}V*<Lshk!HgYY2RXV@6iy|BQ5^>GAckC2ho(UT{ zbGhlKhxFFoI||MRxIl~{-ri_N-Xivgag}Kam00fdN_JhO*BatWfsUUg%1uRBW-L9naVrLPj~8fB^5Ivu3yd@lSmG&P6dBkDm{ z?yd>Z=(Kck)eE`Y)D;aj%Yr?^j48gLk$9@84Fa`f@WB|n$O2}5>1z%^Yw`-oE9)ta z?!zC_IQ{Og&3AZxt-34eqtXLUN@&_Ef#!5G!OD%3SXCcUTyyVl6)u}!8a#wK$NNtS zGg^3{#yJpWhH_Cb4Yhd?j6WL)~XuFhcT7F1>!vnMyWE;3NdWL)JJh^$jaZ&86A%{x7@t z|6nQ&@h;?Q`iiL?@l0vVtc^zm3zvyL6#j*gk;Gg-AlgOnKe zebb*J^Av-{n6fA$hgnnoL(c2MI+>r|Q0)YVl-dK02KZ^L>^50@2ziqf?NhTVy%BlWno*lG!V2Z&wc zy%@!?#>rnTIiK~qYP!rGhu$m#VCqNtQ;>*;jbqA|K%Y$erg%#vYleC9I#dIL%3DWAsguI%Nn4n@4g;p|q4Az6N zU$ad>$>9K2>84E5SvrIghy}S!$cUTXnt;^to6j{|iM!{G5=G~+7a8`$F+~s+jsTpe zTFWGxnlnAk_9eu37}}d|F@wP%Y8?irDS=Cqc?Evww%nA6_!gH>m8?`6gho`A1`f7~ z**l<0WP3H}ucFiIJPq{^axtYL`ralK05)m_r7 z-tWpRPH?B^BT!+8ixhthQF|1(gpi#A8<{3FTYWA1whd&@4MmZb9{p$lSz~n8FI)CKX6Sr{L*LXhEoXM zm^Q>&QC8G=bT`2X#Mv0j8}^EPsP})6 zm9*Mjpo;px{CnQRh$phOz&3oMQM80{oWeWC*j88Oa=U6AczGKGuk|U$vFTdI!hz zGHwF(gaAm?(j35w*E`~qC#pA_;qMm%px+fg`k`qc1r>_k#nd=d+z~qJ|xMo4HcDw{i-fI=)Y!+!Q*#2#~i_s#2AaWWnls&mV+KV5*k+tbPoLygX z^|Ef|mbpEd5SR3A->m%U1L8CrCl558hYx!0;FF5JOTnC3W%g&`Y|0Xwkuc6YC}h zjAJ+}F6pHRYF7Pyv0NruxS*uQ;z$=qU#t+3SIv%4h1cs+`v7kUi=^>h7VZpuuv#4s zJ6zf0p7RRCDZS0)1O~(cW4?GTF$=(s5Z$_3%Zf!zlzI_ce+A8=P!M$O-sKI&?RR|| zI|0&JJQR4Sx=iyq!m+@9M{f zk##t!wslrAYm_rc7>zN8;F43tc{(BH8o!a(NX^>fP`B)b%gfRWp*KixxE~k$*Yvjw z44DwtF{ut2>tV7Wu2u*L8N;CrsEkstBAhG1+kmh>AWLK^*Sy;S%1-cf*~S5lVgOP@ z{ee<}xW!LcuUs0jLd2MJjV09s+6 zc}eU84(nNNikJ%-w?RV4{EW}3T4**zR7xhb6pG=?-B3b2jk40{QJzcVS)a$UfB5k~ z{kY33P9+`{;bDOK%zeURIgoBH8TC@dFhq}e0Y+8DroBcM`rqI{6o zma@L4PvrsZ;$c9z_t;iAl_j-r_pmPxT@=R%({jJ?Y-h)EIZ<9QF6g9a^i;*FIK*tN z+8XyC(bf5g71Ud5-sd$*(BJMw^PEP8oOW+?5~@-ZMM}G@d{tCo`g>d*ik$H~ zi!_RbLHjc@?6|84@Wr%rrC_J~gsN8-VH&I1cBs|Cy97tJNg69$%bhDGY8oI%++i9- zl+vg5{+WPDu?%yXmw6Oa3Eq7cqA!|A9^FmZ8wFPy-4N=^EPp{Ppp_wY6i?lce^Z^M z^i)|IqetRTOQ6nPCFoDchsVMb&(ew%{yjb{Tr)D}Fpt=MvJPs~J#t&GGj)Hv70J_i zw-5?aATZ$f0x3){vSR}vV3KqgO5s5XAev?_lE!xH7rY=V2yegK^lwha zu+w$1$ER-PDyji)$f&~ScX>11QBfimiGX23#)E_I6;JY^dnwIcL><@xPsVfGB){_@ z*}JF`0WcRuh$-c9h{8dxs*y=0C0Kg6s{(_)BbzwPg+Zgxiy=Eqo|TaWm`5}Vt#nT$ zHx=X~7a)e4kua%)Y2XSsNa@Sf%& z6Sxf`AD^Z?l(Of2-~pToM`&}C2>Yhk*RWlm@bU?2iz(#9M5mEKfUV zSUoY{c_|RGdx(}%2veP6W79e74ZUI4;sD81?(moWJdUKYs$0=3p8-^KuH4gZeK+=0 z-D;W%UHnHrt8>hgeb3-B>lh&@o=#w~2ljAS5eyK2M1fiemYlev5sup#_lGb^1Z_## z`GuqoaV<{@ljxy-XUs_)wF*k0Jg-YdmnKSFxJ3_;fN&ZoRcSLUQ#@>q`>{e|y1NnR zSY>us0sOT7TLw$5l10^#!Z|`I3ni$hK67E*Hx&fts1?vHLc}*;Dd~^WNZz3lQ(H`T z*aGm@#-EJOV?vY-p7qt}lSDF^0ex?l~g}jUt8O_ddlBO?+HVLwc-2qR+BeI;HIpo;ZcWt|S!hY;UYn z@_x7-`cZYk6J}?;8e6)NAAa%vKi~h7fbsi33-uGfdQTMjZ~FMC7nu6XbPcY~##dlg z$_KYR!d+)@wQ}PopP6NZr6*L(6PGkEe;Gx6&rX7Lfb3nBv-8u(bL5vR;@@7PDQ9MlQa~VpS8pp~#_!vxfx|!%-cD zKlh)2|6hl+{95<>ek=hE%v~-&e|EduNl09(pO(np1+{)$$XFMBwCM76(RsZwzmBE z;lJ+Vk7LFU-zCE-Z?qjnz{|G7vP7$l3*Y7Xp?4$EI3HDNkxmo7qd-HvL71jUP8fs! z)xJ^}kjzvuED_;}0>1X`h1g`9xdXY~wXFh(rZ6o`1>a!}27NtkHAmv|rb+XzA2aGJ zn)ny+Mlm2VmV>s#7c9!3QdysPSyxz8op^tr=p}68Yaqx_Al8Ap7i~`%;@nV32i$vK8*1}W*SB;aV#HB=g_aJ|H_O0kNS3b*E zb{aGctd{HrpDN109bP2|ZlxPFUpv<5SkMAx?LfOi{KzT&kErZeBe+;4^r#$8qFgE9 zjo|k>o`#dBD_0g%>{?$gF^1EZv#3Q@4k>*m>5YX3T*VH-30-t2)-l1kDqvE8gq> zAyRe741fO<<%hrjNy`t9A3T?5l#b7-0V=R%#q!@_{fXEg&Wjt$-j_RM*z?4!lZ1jm z!3`=>-_dD+MeMX^00C zrIlIb`<(a^|A&IX6waZ;mf*}_eU$;Ig@%q*q1!G&saOQZWbIaC9HoPQk0EF~G7kok3iVanUD4``{UsUyQ`lK74EK_)z(jnkAR9`R$ti77e)1%b~2L zyRa+3j!7G0NNb@_tpcDOVW9f-fH)Pa;{p@lNYG^nv~0ILjsw;WEIWqNCow5T5TBNL zSq!o(KxTh<4<8U}Zr&&=EbruvJ4pAZ*{-A|7viADgVjUuKYP$1Y`5_fI-V+k-r!q# zj{B55FBbd;89(tT4)Uz5`}_idt!U;s2K#nFmJxNGk6C(tEk|*hC=Hem8EybzMal`a zbEt&5#3sO)edODuk&GSERk~(OXNoO6DN&Zm8E9Tz#%0l#m|Z+=aB6d+%br@dX2^t6?-NhtqSi}w>yfd!2Zn$OKa)tTV6}WtU+&lPFqS- z(f7nJiJ;e$3`uO3J_lPhEl%Eu2VRw0D+dZxHPU;C^eb5vMFz#-D`cA4IYOH!h^c8j z1ad5w1a??37romA8>}s4sha@U<|3IE@mH1O zC%3jp9wsFq!xRNImgHdz6+KY0? z1KiGh*g)?TSPe)}P<*++cPy%VZT@M*sE?BD^ccU63cAskjeK-W0Lnf4mg;Nsm(ac7 zu|5&NG!xlkY!;PeW32)^huz;g_F&M1x zz+<%KH8Mze6Z!Hs+)z?}9>-`Gb>&T0p7oN1TtIEiG(T$(8)#-hq{b;C zd<&@S!!^RC#bSjZ<%&1d@xhgpRTj(62|O>iDV&<|kq`7YtQLtHe($<4E6WfrBOAj1 zg^;NscULao)}QmHUcUI#X~S$Y7AVQyA6lruP)5XDv;z?n3{+i5ERKVsz`p8sHa)^+ zhMfKj5uCJ50yY@f= zBnX$Ds_6Y{&K#RO++E-4lnx!gkf^f-)hJMdqEZ|XJw??IG`r6O6j0bbp4&CtMVJhR zm3Lcqv=+=C8UcZ{7*DJZ+O<>|!4)AX7if^csF&#aWK68kWwkNp`!-v39l-zyhgHsB z>%WV1{yg6Ggwp%*Z)Cb09QcXg$WRtB8U5bLOp47F8;)U3dMW|2l${$8nJzvFnT}*( zVvYIK4)6`WU9f$J3!fX<#~3wDK>-3u3l16ZJGDUUJAhn*ad zY7c#a84#ydSu52JhZY0yp3Sm|l{jwBy+Yi>Z-Vni4olf-9!B(&8VPBGw8zC-uN6O| z)ypj1#6hmvH=Y&aWR9GuVvhm*al4iiMd*?@tMcPVZwl1fWWwZ5R;geympDJ2eZF~ASJ9ve zL0lD5yJaheN)d1+L$$Ld$?B|Jcyy+&UDcU5di*RgG)0Xd7$-hKWa{VL`@f96e@gx= zE&Y64Xf$wW86Zs#cl%5KhkyA1-N4$> z3U>NH{ku;hV9hyUKMG^l+IYKqDNtYxd(hLptWA-AkwD6r6)Q?a;LGll&truZHd%|| z-@m)%Mcn&J7LGksLA+5G*%8Y=mk(1_bW*<<$|Vyz_XOZvPDL_73Fqs{F#0rx{v4B% zHTH^c!JX{hUSpib=xOP3ix*W;K+gID7H83UDeZGRJ3ET6F|k&5F(#@wTn^#ayYGDz zL!dhMkI5^JN$ei>fskS}a%EPC_B9ue(?8#B5M!LZE-w4N%KkW=lWV_}>zQ7hP(4;j z2gPKON2xvoy9ZV%hhh!*?viAE5w4m-Oq!h!!WG? zIjVPBDr+gIx7dIjlFx#v$=j3V-pbA&W=Imvfsx5rtYjzSqgJ-hoAM)+NkK1`*thc{ z1ESW~Mh?0qNs#w3pl30jAH^s_oFPVHHR0^~Ge>80EOIW1_zdAF{Fvt(%W>rKSy28S zCvb$=D0xQ`NdV?3m4H@V!RGB-DXM)yexWWsGDL+t=ZH&Ew2^> zCSC+aPFL;tX#)#)n)+naJ>T(iwRF1k@nuXD5JhlI%|^euA`_C!D<1cihF2170ZmX? z6d|f4B^qVR_kY{mMk@xq4>~0J=hZFMk&GuZBLKb_9N^Le2`2t7^}ey<^?QJ%TB$HU zbHK42XFLQw@N-q8ZT~MGO(E-pzGp+6cM{{4;;+=E0BAg;@REPCj2P03A>*)*h+p}9 zlQ*fxXN8{M=PFb5HC{lizXFY5^+3K@jCd`}ib+~w^S6M;eHQk|(4|zg#iODu1$f(e zLbsDy_r`x!-T_ue-VVwL?cV>yTTL>Nt~>uijx;m_uO&L|`0$M<8m;N8bHBpzvKjQ@ z=LP;2Ff}Wr4nq0g<~*KA-D8K5p8elF*cz}w&=jawMM$vg^CFNYAlMx$Xni!2()Wa) z!bnwlNFQYY5E7|cITSDC1!1KYeDODRqfPLf{2_QC_?~#Dp?ztC(3N|TN)}YMSY&}8 zcJs^9-I}I+tW1txhZKIRh52#!_%r`R0hBM}U0-#h?XB1aj1HNOMFF6oa6wfbE51%p z{`9bhj_?aFi7r$mQxyt4Vk0g6Zcf6h+uJ7UGjf-KSr~LM&r$UCrD(Z*RBzCf;|= z-{z$r$!Ip|apgpBMl+ar=BcadLoOeIk)3YWn?E`M&JgF;k}og;HWeJVV9nj#2)a5BSjjB zKIdm7YnA7{S}Wk7H5biXKaFLEsm28SLD|Hm2F{2e@NGd^ zX`V8r(RYo!zEm0n7=+~00 z)WXazQc5%!!We?wt2R%1S&W6tg=TG(hGK-Yu}^MlOL?stb$5J>QiTNSX)O~%C+XP; zu8Q_v!AeH6vpyld2;QfU{H--uKKMg1wU%*E5Udd|sfiU|al4O;I)oBOpaRRQG+#eD z6j`n>@E>P*G!}R=F4)kF4ib%U6|cU3hkO4h?C9MtYYHN_N=Wj=)*(#-E5JmQj}n}5 zm4w8yq7fYEBSrJP!^*`PY{iq8x>Bix+xRWdF2Tl!Vl_P28!SjpdM0Lrk4~)RA}?)F z@o^3D4mAv2syx}#11UQWDMim5#b4C%x;6q5{{tI{GM^SGy{~bZiXSr(06d9NUhqha zXztB;2bj`LO!}klSK@=bi(BHF4DI+JCp{CY&>&ju7#kY08>XQ)KrY8c*+8Kjg;K!X zS8yR-xYIq0zsM!)?*GcSGj0_l*q8Ymg1Cjfpt2bGVr4>BfRlQvr;EC8gn5O z*MEmmb^CYNMzOxoYQBkK5LN?%Rr*Z~G`r50;@eg(Ltp;<@ZVYl%Oj8I2<`S!fp3L*jrT)*`}}i%QKexJL}0 zz*GCu(**;*I#mvy_n)~%O;2Bwq~Jr+MerAl-vit#Y}wkQ#8n&1Sx|+b3L>j!zeZp0 z5|yQ(a%j0-PK9hzFEfOe%hLlFWtcmoeBIo3&}FXUY&hMZ|~pf zx_=U@On}aXb;)I>_FGP+?#erB7#3Y0(_We zN)?RmF7f>PcW=849x9^uz_BgYIuRs{14zMTibH};anO}4Y&j4&ReNFrY6EXdN_FdP z*eGOP^M?xZsU0?YXKpiH;q{CNEIw51++u@$eI{z#EvyW!2B%! zwgobCW`C?_b8!sRR=ge@YQ=H6)UJ%=_+nYdKHR_ioF)O0vjrpYA{+AvW}r5Ow{!=; z{j8>ugQy5PZtlIu3GO}GvkuYWls|{S#O_|Gz#eFX*Al)kbmEI7f6-*OE8da{_ArnA znzgw(Srxe@76b(Hs#HJ4h#}5W3GWIxmDN^3H6AEQ$ zp+D?OF=D(}D#&8xDpx&LV?0qYk#uonTOts#_#%CbOZ;dbULyMFtdx}>ws3MZ-i^Iz zT_~vxd|}R_h%uy?vHcj?@dCDxSU~&)pniFC=dU337*DE8f~t(YQQ;*{RN5m)oiR3X z0xs#KwjY%aTb zdm198V9{i%I9sXB#29J?z|Aq&MGgEfOBuxzgtOfLLXgu>-~VZ>4#k!L=)4Tzhp%L+jD=L@ywY=DcuUyX4ikWbw!GvSw{2kg4QvHvm?Zy!mmt)Aw*w z+Fd#;=U|&E&LG30!qhpajJ@~rC6@D<8Es$#r~*rMb3+#*`;+VLwY4oK{`I__bx19C z*kpnyH57TA6rb;<_Mx7f)hp>Zm8cGp`?&k?%lH4~{eSW>F#^y><(Q0MS_#y;E?QX^ z;Pg1N31EG=us$eG-s+;p`#L0P85Xid1%eh0Oc%H?x&tda-t>gyL1Z$C62=?vIkZfxDSKI+km}+I?0vD})eTViqq+^9QhbnC?ucn^WAvVhE zV(Sp)tof*iZ~D?}weiJXx4j#B{6{q#xwl$H&v&M7Ul8#;T@-$95w(kP$$s0F!8Hx} zmynNZ(mAJuR3IJpvgaHw-AiTpAj}XjolfjT6Sqa#NlEkFQnp~}P&awng!k{h3->L_ zZ_{I9DC|sm&K0$>@oXte17Z2ECRxeMZp;1h(`OXXzq-I|0rGnZlf@A3S?|$z$ zy59>s@8gfV-?C_mI|OP~aL(oJZrz7qpvx0+MtgW0A-*`bs?p5j;WS^`^kJ3UGue=- zZ>$Q48s4SOUBXg-GcAVtQ!E>DH)1~qr1ns*u8=@*VnwPkT%g1dzn7jeNPAxLD!>0( zVIg6Cfj_nbSa=7V#{=7A#6pN8AD;P;;rL^kz;-Pl>cQ91HvB^W5L>3&Y z$ilD{v!ia}BTJ-~g_g?&DQ&5p*n2Ev$#>Z3X+UK+mQ-xEit-A@oBZHqQ6EqjF_}3u zZAb2KCeXiJ@+B7tGCR|)Pu%QQcH;ZG3}(g97%K(VK}v9I8InK;qHj}bNA>}n5HEEjS7Nm%7(T*^jI|yC7{wqpfz_q> zN%d?iN_eAdtOR~ZzGU}lxV)eW0HAzb%OQi1_m8p1A9ueKQrddg|I?3Sc943ieLW!_ zXufuXJU3$F4k^GTf6a%Ttlm_zXEa8}JBUfBmDt5`WXZQ2byoSN4O&=5eowh|goM5N zBfRf%A&1^VL!*~QdWD#%9Bu0uIs%bzSf018JHr+rnu0_MF1*FOHFGe zWj-I4ptdTzfVIWcnypuy2r&TIl4*(eh{F83EtTgb9qgr+jZvY?HPoceWg_g9h22|= zO#{t3#EgwiQ@}j6w2EBF8_1=fy;jpzFA!fn1L}@#lzqTeOTJY+ebhj%!iz*NDVr^j-3h zX<&xAy!uOSln*s2(c7)8i?SvH4Htn{S>_^~--W=a^= zZ4%>@yPvj|Oe7I;eC$;R8Ajp!+%nh{Co(Y=!_J>%9E7gf^w>A+&J4~Pz) zOO{})588%$O7=&Id$Z3ADis@;T3Rs#<+6^GJdXXgaA0@la1S0dP4OjIK0cOWER&9V z6m8@S4@aias`y;2f#3qbmj^O{mwtnahZIaY&m0LXxoNKLCYY`6n-CxB`#iGvH0CrY zQD0M`?DCx01r&%Ynp5UDIc`FkSg|5LF7GC{u>g^MKDM`XV||l`S}V)(T_n8P?NsYy z)C?4iH5Ec*puCz}A;|1{Qra=yoZzLMkVI3%rd*5)$BxgevyZtLxzzLm-H^4X>M^~Z zFv@V8yrj@lCWkmCX$Y{w4O}UjrL`0$iIWPJ#aq~Axv2}|Q$X?1TH=+6GlB1tCMjd5 zJoQ=&kIa$vUDka_swEmC(?alikAdA{Dd8BLU&aq8=V`*WkTuwrz8j>sf^t+U=as{5 z0GZ^^$DmaW&V5$;S1KTSp$3#FD@!?LNMa`lvisuYGtPf-ycDprZ?dMlFF7#Ru=F%_=@7K7?uCMKe4MhIMH zpx{lEjXR*A!}-{clrzZb<&I0s`*k-D*%`8DiW2$r^tmQr-R>P9vD>EcSvimDfZ7Nk zx#ga|<5@5w&wd zQq%0D7>Eyji|T&pW^Z)>yhKPFw_f_D4Do=N2H~HyQWeauIX;asa!&Q2IF}eERf`Gc z=*`|(_m-!X`uYA{@4cs8iTm$_sx7LTd31Yt9H3y|aKHetmJ1-~~azoM# zPd+5`M9?7QzX22|XRj*fVVo8i2scA_Ld1hX@6$`YTQwd9y|Lw##Tcb~_nyYbF{XPnHhuw|(dXHCr;zHFjmqR>xp!Gz z89%YyvRkIJA{c|~?szdDUB)>Fy&Ae|B8l`>?mnFWcufq3c*{18i}${ zu89)V3ef^`5eINHPZ`?rZakD2p-_!!(ahw!Tvew6R$ylCD-Vi705Nm*_yM&o>7LYm zxXe3S9!=(KFnkVsrUW(~qSL}&fsIV^=#j2p86Wgot+eQ6De0T^ZART;(clQCu=Elj zTvvSfs;G4KN{$o;i&qlFOA&~oh_+C&WumJ%#?!0}#rTSnBDlt~*ZOVHUz`6K%PhLI z)L1E*w$?o_nT%=<24%MB9+P@jA?{V+Sf3?_A%T#$%r|6@?ZAvohj5#saj{uo;|PBh z7SI|}J1rPwn{UJo{s-M}#23Nb6>1p!jsi;NAUAbbBk6=1vQ?I<;LG5pmQtxZfC$Xa zuTKl!g(^}Lgc5!m$b&L<Y#qk zKX)Y?lM4SRk2l^*MnPM7r7BNXIL}-vDZ?xl?H%iCQkh1PJD+s=0HU97GK@B*qTfku z#jEaB9NA@&mS@NFJd9<1MEXgLWm@zD49Yd zRJ7|nBk7ZFHFb-E>D~R$;v6*gKrK%|+^co`l-DVtrTPPt6i#J=w6hQ@jA3*p%gi4x1<3C zJ~=-Iyh~&1Ar2Z-*`?lTNDnt>%u1D}biwE1wZP(h-@747jndPL#rzU4MbQMGkq9~{ zL>cSe+}L^uo+=>@en(s$*aVevL+PJp^u%mi`Wr@<#S>CtLxbW0N;ND7!4e%23FBsH zMjR?IeXD>GJE(f!HSi-znw)x-o|cf1~E88o9FKQIZ#f-D2 zl(=!~sb%u@8BwvvgrF(=HKrqQxYSSQAnuW%i!qwrr9b>{WH5Q2t-wP3$o;L908bBeid3?qW6x(Y zv*$%_hV=4`!EYktwE`Gd9XG8ybb35HIO;_4)12Z?OmesW-8Ww-pO=xi04XU~2ev?s zz%Ge0lscTeF%@%EhNI&>t)WmE2Nnjm)J=)2?ziYbtOXLk)2*z=CP_1AnV^x_uo*R2 zE>TmD`;1lR^d@&mhWjTLZS-+podEgwm!RFzRH+v2T9PT`F+sI4UZ~y1s)-@0MP*Vw z5X`=?4Y-9hcq?^+M1d<=hExvD&HqrU^RD>ST!$D(xkV}#N*eF%yd+2UeRUGNT9+0k zxlBfc6#^BzaS`~vC$larI%MaEhPC!QKx#TNh^d*^!Obbd9#dyT+G^@8+AGM%8kEUNV1kuEA)~T>mXtDUYxU7P2 zJis*}T3(XrMKd%imH!8%OZfxDy);B8RBbcDg-aIQ0ZrF$cnT$BU@1dKN;RToO>4Y= z_xMqlIa#}4|Js;~G-nY2MH-44rXP7D7iqOa4L)D((%c|I+DLmD)4(+ohR_b79jA3j zL-BQ>qgU~7m9&fNexqBpqK6bmJWqy?lV8}-zSkvcSE!q}i*C{BmvNn`-gt>*1#2?z z4CUcGdE}Kt9A<^Duq=m`mE#!sc;Z*>@0W59)Uc8V8WT~bj>q$?J1eFOl|gu3;=^KL z7wcn_zm>s?6Np89y|4!;Ahp;eMK2a>t^1*61?Dk|8L-Riw?V(F=GEk+DWSo0L%%m2 zB+tXT0HT``Gt3PkBIJI zc{igV<{>Kt}u<;16HFQL0MVKbP)IuVW~Q?8*B5qa5P zcJF`c(OmNZh3$QqB^4CcSHfg5R94kkE5vxH_{g|;KnN>mIkU5lvAK-KF7eHR0yA8U z7pz#jJ}=`Js094m7WZAcaOGBQ)Ae>^#baJ+8P}B_ z3Z$(lM8S1K0@WZ?A{XfQ&IMpp(w0a{u3s?{+;eSp7tdn!uV+qu;grPNfF3Pl^a++wo(Lz zF*q?*{Y0n_G1k_52Yf+sA%=w7B>PD#NbZ{$ zuaK$DUH=4YaTGssWl*6ehxpU({a=(xU~2&!)`~@>_czj!wWLlsFb<2r`HAVZ1ZBkW z=I+vmA2W>42(!#_L=&^`R0V*E6wigbNSzltC4r8{whi%Wi$g_z9$L{` zfg%FyMj3@7+JnXxvy?H6BSK3vp1N?;t)i*9dwkwbp(8-+%bS=515xw|S{6P(E);XgU~(_Vt*XLH9lt`7Tf&_ti=@P^29WCJD!pib7Bzy2UL8;& zx42$DoS0nWw_X$q0%B%Xmj3`}zc<`M23p2-v8p*rt*}&zpvZrH?Axgx_wT;$zVXSp z1Z2M`q)f_l^Bw#dzXay+ry*1ye?(a;#}wraNzFWPBIB`WWp;E@Y{n7wQbW*4jxBO{ z20>70Z?boZv=a~UN^oI9X*@je!UCJ z`z%@_di+28FJ=#Ni)rLzHiw=kB~f7;ar2b*cU{)~NDdN`VnFfty6;=Wsl8h++YRt*Bk!%kX$ zKHT%0R#`!Y?41LFo)n$8PE>|+KMUG=1q`58!RKe3y!1$DNZoOf9_V@nX9P$+-}flM4{!1)^*J7r$vCIOwQjJ3{ou*q{o`3*AHHFc zEcFE8FeRu|l0Jeyb!&&~{+;4xaHnIe>g)6uWtGJcV;l0t&75}*Vq;&v4%u817+74^ zhpZ~;1>av6WAUtiS;mX zQNNt~kpJo))_)hpmRCUlJUs zgO1#FUviWLm@pLx)2=elb90}628!A zr^OxK1&e_6ZAn$B4sPzs-#8ezEWgUvU^BU^QrQ8BK!wc5Zo<@>o{p~)i`0fnyrm$# z>PtYH7*FZxfLmL;EZ-wpX-SEnay`rB z*86u{jOkpEm;|pf7F}7m>5ebQu{$YIqhL5kX#P8aZX-1csSSt?fVJd~^3IdE%#3-48qg7Y}2QwTg-1y>M0xOe>7*pKn zo_pg+mfDJOuo`Helmlw20J8f&7o(iLrFB$7_43bb4yb7gDS#nUv;@gRB?3*WOjsfJ zjc&VWb<)P2jxW{B0+jVt9!t$|Y+<3>l0nU3f>GS~pgw@&*vv2U01{fDz@itr8KRiz z{>`YV=d*NDKm4ySUA+dI3d!`M9R~z-5*MX%e^sz&Ts+$?(GYAU!CWYcs+DKPHY>Rb zBX_X%se2GqS`*tC+8oOXgCxk!&q;>{JmlA>*bx2_6IvwwI3AzK1dRmlrEfLIGf;a*duPPyuS8pZUOLb<_o zRiN(9ZpunwWC-5nQ?aq{|G)13=W(2{N~XZ$3x*Z-B{(RBfpe?&+=m6E8?yR`h0Li; z8b!onk@{{Eaf0ywIdmq-AE+?9r$M_A@IDvI^wk%uVn@Vq>6e~xL-;kJ;hHyJ_A`L!G9fD01(W2{QhDT#it(%Usy z&$pV=937<)31;2ii>2p%A|0WaeYL7}K}{8Dfon=k>eR(*|A>HB%yc7)Z}^J92;jiO zG2%3P#&A-oX%$jMY3z8~=hUM0$J7>hmihGS-EL7PeGdRe9WN&|fXk1@i)YcurYK%u z8%B-97H;O=L-me$BXw(8w7O)M!Y1q8#olw;VjEy`DaQklJ=~qz8ax{vK|3GEufAAtG!F+*JDNwAC;b{7S+kJ3l z+f~P~G+()8r^E`Z1@|zyZd%l+Mm0EB3uVwg>poBFM1MJQPo2jw<)nq+# z>DLSuj_pI7Gu0|LKUY{RJ;A58os%S4Dr>CLY0DB+_5ImfI|LgEw!46REB;MH%7Z<#P43n>?6=z1fNa2{w94&o+!5xPOOJ1H<3j zMW>Zy@FAo-wUOE~8}JZeilZsoDw~qa$(H8Yj;H}%f~B+`&ydIvQzpsAT%+9lvCI%W zKC{G)=JaDL0SlFH1SKm0S|UeA;I%6;Hxk{b9^&>YL_4?QC3Ji$nsQaGd`VtBD+m@Q z4reRog~3uop(-Yirq;C>3xS{y*If^QgHa+m_~C`cDeV8KfDR{z=icMJmj2z&sAU23 z3-&rG&oQ3+cfa5LZcJ2g8^(GK)lSb&pd2f*X+1d-JW??>6?UBo$M*qVR*p=IUHW_L zKn3_x-jgM)d9opL>L32r60xD(^2R3q#TVq+=B!au+A5W7e1 zKR1y?0>&b|MA3QwZh0TAvT8ffR*_g{!e!MTd`1uE4))7n#}SmaF$K%- z&bHP7ES1Q2h5mO{-a{!cHDPKELZfO2#m?w-$T5qPFTAn#-$UMzx<=8la1tKB){$C( z7F71?I-wwh5m_zS+@*)0FbRCSSlISG!pf-tzxccYAzR$r+|va`*!!m2DIA&A6_nNP zV8h}|%LNXltdE#h<2Qa0KjBsv&tEW=^p|fvF`U>cS|QXNkco5wfFssuY%gho#WGX6 zdBd;0X)6i`7jm{D4P4FHGUs1+zw6h+nZh+`U|n&#Kshj*Lcieu2*tHb-Ed(mRj>qU z*|14|L8X}nqk>%sjFQ}o2_1SwO$jFsO{35%JQy3rqtN^X;AkZWD%Pr!2OgS6MN5J6 z+-W(g_(qe&&hZ{oPh&uG%~rGafYM=z>aRN7{G0!c|6*S-G*c}p+yX-){=_j#r$}T# zGFL+^)PkeSnz%c%)ZSL9sd_*G zLxeI!X@QTPB>ntS7D=KF0Jcgph`EVlmBM~_)C!EDYI6k2j==;nmY~J6vvst=82$>oMhV% z+CyEIFa)IafmIlXWX7#fYG$ra=-J1Cs0jdT7`G}K7AwnX=4L5<*nrIWLK*B4`+hGN87e39O^D-m_rg{--@K{`Bg@#Z#eskg*53fL(piE-j#i6IB%f;d z8+=<#kvNekJ?eq@6L=UR5OE6Q6OF2w{!|N#Bij3Uhx0T+Mc$`nZi36}!vy!~-{ERX z@zmzDmXI^$L1v`GslAq;L~#h^fEEN1$ABhmUN>@Pd6ETn$o-h+1jsOLPz5r^!h#7* zGs}H?#MHYxr!TXSZ|xrpDVUSo_`y||d3jW4&U~%bMzf}2POnrx0e{YyI1DI}mjfso z)b6$zCj1d_vrGxguq!shp3YvuP2jS)-*{xXOw!ku0u zZ{9_2qjBp#z^ng`#E|#h43P4|0l|hw}1h6v2{h z7t`k^?r5Ky<2@2N5I*t6fnye2BGhPo@ajDce9_NhpJ#B$Qc6TYLR;lU#$Fy5i65Z| zu0%C1e%0ngtkLs$95AH0;{tWKp;TP2M)aT*Eq5P$^KxQd7L-vo+7u-`kVZ zRpLK7zYc10Lf@nz^vNeUabXJXfj-A}-Ty`R;m_lT5C41j{%`V!e-9n2C_9c{b02ad zTO8giR?>GajaFm~ay6wj0A$v{&4Q{EQsTEa%%<*%&YmoMKWzow3SkYrHAP%`2wLa* zv0H(_izWYFNb~tYO-YMXUaOc*yCH21OPzrRKq_*Rj5F}ZO9b!8ivT7C0ZaTVt5Jy8 z=Gv}G z*jWz|zEsj_JWNtB$J3~dN(oP1qIj$?58q%`HTBG5$2TcTb9P5AQQdW5w;BZQ17|$S z8$v?k^sK)CEI=qO8bb@!4egJRweDl3n3pVQ`0#$zmKOTBsgaO7kP@oGr*{WvSWrXw zZDXv8BHT-J8g@0hX{;0`585Jr5^S-(84sre5F~~SRNGs_(s|f4)u`}5by&g*hUGK z`DHsKziJ4+*@VvSc!xPYTrvLh{ktD_yRjY@$`Y8AQ1n@!Oy~?yh3~wUvVFNV<>)(iC1WHt7i4$^!e&T?5P8VpW1>f|7T~H8dO3hT?lg%E0 z30tH1yN8#J7+42-q1tI|mX835nK0G9Ri^+F6*AYxeUd)|RE&#R#1UO{tq1}8~ zywAEluf14kLRRD4$s8$9hM*?sIIeY^xIgqbFAh~1J#%Pa2T_8egnE_6oPNnJ@Z3V3 z?H0wQkWaY{C<=H~sL&($8Q4+Khi?w6H7FYBEF}_0{;f|TYEOk+ODmw3k%iZk$6PV$ z@o}MfUf(gYtK$NDsPx{k?%;(e><-hmhH#bJvYZ#sX$|spGPA~v<;Y$ppjEGGL z|E^jyKEzbg=J=9AFc#yh+3cu#s}ep|*_frq_WDMTu6^G44tly+5$;oifHWE&A z^X$qxDrWG|n6NI1Fg*+-4{VRJYgI;pKEZUWCBf1n?XaGFbe0Hckiy{cl7M!S;m2p} zZ=&ZiIcND(tJ{@J1aeud$P>m33nEqj{ks=fK;>o~-64zENjF6n*Ht)xODE)l%D4G4 z-Ts3tJs=Q|kV{qa%~o)H;|R45Ig@(BU=X#l7Bp}ev#fw8O!Z$QUn29@rijXeVQSXw z0%XOYsLM)m8#%ExSSn7B_=Lt@ZsE%sm`<^J{YA&apTS;KZ`?cJC7DFqhei_7e5MwMhN zW=T0k+327XqBYm@Bl?9h+@b@0)8n%KfEZoYDf(Xb@y8#>8sKA!OXDALx+*f89R_7_ z$9ut0xS6n1;z$dI0Q**oU5(2goy~=TT`NPu3xP0XYhHpg6jCl7q<^T_`?D0N_|jwR zKD>MX=kZ6uv(s6edW^w`1O%V+Vcd3g+*A`cAypF@4Sc-0J}J)|Z&2~{FEcGgunaE| z926fI0(@VaO8(>S{eO&SHOq}9i#^3aIxH0M*8tUR^D6gi2gXGKhnn}!tWHdkgEEWm z;+FK9?8fU@VHv6xZE}at)}XW#HN5kr2Gp69_(hPV|mWtfw5&EkjG_>qxME|QdYa+ zFrLd<`NEP;*y{qYBqT$lQsB!vi%>=I(#=^KKsmZ7nWrO=fgH{Y%;59^fEp0BD~ELy z_q(NYVFo29DR06bxL(0$5QfRZN=7*1-S0$>~3> zZO6`9QIN*v^qMT|wgM!drj~%k;p(`$Cjsew#<*a(?HE`ri7KyP?^kVr?==gdRVKju zpDJ4WXZHns1xdz~M=A@nPdUoII<>TdC%vab-j$5!!5Je!2^wdS6r7ct;rg}(T|8<^ zxxkevNZCYcZi0g!(@_2wwtGPUFc98i!iHqC?=wG;l#XNe*7vxyI60FsZ8gDQAJ{^! zozQB?{|sL;ew4C7sfDPnbZdS-_3zLvxR+iB(W>}mjEAFC;zl$aAZIqtEZD^?59qcm zn}1gIhv{Q(Iy3*YdlduO?4BE2#hXzocuP|lpT)_gLMDaEh&F(qbYFZPV-T3tC5MY{cI(>(JjO6VoTde871i=m?}!IX`NhH1gGTb3eHu&t za_~(zvQTaGLLT34om*ye35qGNc^Pu>QDAMdw7=R9yn@W+tFB9ci2m0OK{U)A{^^C6?i>#cOuj5ZG zTAzOsX86`?v_ZxvyW?LB5qJ)5tb&49;~Ob7f8=JYPozw=4vSyL+r+xuN@Gb;Rv9&C zd5lS3&=R&{Lb2>Pm?hBFTlC!9XJuGlq26GtdV>F|`>xLf!7G=+(MvSd!BcEZXSymc2W0r9Pu1zE6RIm*JGJ=OP&&Lo7iJFX(Oww z5&S0VU&KZAb?q^J?uH-^v~v#bbZL5=%pZHHw5MCa+=Er< zS1eP-emTVP5qYlh??3+I?vMQ-kUWk<$sR^DdORznLnT8~)uIo<_77%n7A^w@L*Sky zwt|m{QiCs$OUWrB`Lw#nu*2_)QN4x|b0u{w3^M;L|EOVp7HVN3I#9W)o0!)V`fkRe zb2e~SbjFKcP0Ir)4=s}HWqKjcCnXobz08x)VmQ~Q4r8I&q1z~Xso^vwQw5Nl6olDL zQ7P*(H*Du{C6r(Val{j{R%3~2zkN`aPHL1E$l1Ub&jt(JA74W7iz0zSp+HAGO!xou zPR*QzwD{p*TJi{G4g#A*}mBkVo$CB2jNi-k=xnNaesz3^7;Jb7}yDH#$ ziy)RDTtmBV3Tt4OyOW5Q*LOpy@ijr9&uOSm0(UDRq*yXycJfY(&pAR@Jd`~y#{l$) zB1c6XoJj6DX$!zFy;w~F7>SwQ*GcfY?seD~9`7ZRMkx!wi0^l^!};qNU+eO=?}o+{ zgCVZr2hhSpB?CirTVg=wBkAdRDvzcD%wxML6Vbi$Yo&nj(#rfKe5xzMYI>8@i>aN_ za>1*@3HM1gnKtl%K{yk5bl%BV>RY0R?5y9Z5159{P1V* zNt^tec<}wZ-|BwzH)FLadCB^xp#@JEjl=NCSncBkG%`Pl#h!US)jEBj^qb7za!?YWK4l zUepzI=$mQr9A)xXM3qM98?hDyen@UgO{%^a<2PP)g~IL@whSvr&4|2Elt+UVBt>74 z#g_x{${3P34fw&;)ME(45INdhR30lhC7x@YH5^KGMb-ZMvQfRcUw7*-VvToP8{#`3 zcfT7S+Ch3McK`;>=dtjcRLhP%Zbk2|4)Szx(`|G+{wvND%os0_l36g$%5T(hSXZ=H zXb9`5kHIAAK=XkY5hHjy=zbWh#SJ7!&lN%%uehMjtcFl1`3f;RVwm%;q#h9{Q&iM2 z===ishR&_3$!-}th2W+>q3Obk1YObu(Dm?&;w7?9jFD3H=&_&F`o>7Bcx5(vtT9(| zq#LxwTYgp#PcAB*^lR)uZpJ%6a zA{m9`ufxgSibrGTHf0e%BO;I?uZtYLY_NZ$<;C&UG`E*m;ye7PG^kO?6@3>z_eWLZ zKXdtIh9afaTT-={NqqHJy5IRI#wiltdHkTTaoT5n3fZ)WVk>F=A2$*=3H)LuXUSk7}54$J_7T*RrLO6`sXqjQs;PdE?Zn7#H1ID!riH*L`NUQGJ?-~Ik~JN-|QU{F7XS2X_;cO(~aton!>5;NEj#G6e& zo|N4Jl@D7^)9&F>j25=Q=UJZq3zgrnMwXw0PYP6vTDFXY}NF3YtM5seN|1O5Y|E zhMKmw^^r=GXazf}0R@+&yQ>iG5>R&0j3g2+q-LScK?F12t^&`ZmB$$;9`(eNBH)8WJTK9o%8NK^t_1`2W{m~zHzyH0DVvm_H0M6@p7vAN}k0Vefm(7p6 zp0k8N1UP#>QzG9_FG<_AoFSt0J%siv*Wyyo1jo;A`n^CUt z2^l^X{I%91zzfvMHeXT`s>bsuQwD+a&T_LQ>MBrT6GbmKRT%MBi@Dsh)Qc6j#)eO? zwbo^6=|3ev&uR6tbttD>i{`WlZ=-G1QleaJ8);)U3(5UIaCHufCLZ0godu+Wh4DBP z1Gug$(&2nE#$-prY^!eUjW{Zb#Cx;2@214=>KWWb$x<&gmU#=6eRMX>>@qC+m1XOV zP2y(47T_bKg9H_LmUp2zRbP0S_qEKn%EY9tMF0inoFsV?ln-hkWxZlB%nxh)syfY> zaka=UkSAP7opH;>h&qExkiFv?|QKE?Zo95Axmi`9qL`Bq*o$?R+zIml&{1ea?KIsAiDUl1E8#^X7# zVWLs3zfG-SFyb|^%nHuPRsGO}-HUvnj563Mo(=S_y^@j&4Il181-4!sir8BcE}F$W z)4dZCp;+?!f9w;0e;Erw^rYD)At7`h$uqDAsq_@bplrgjlW5shVi+k})Nd+=!1Xys z4cD(V=ogRpz@rceg^gQ2NOcW2Fd%d3T6WY?5=61f*5Nd6KhLsNvdrLfaRv<&v=rhT zKj2tH(_;L67KmEG=z|4w9D#r|U#s24|t;j|?=vz4}qgp1EsfRp~DkXTx$X<*5(Dtbl~rRz-k-x1I?v4E>LA`mG+M~oSs!#|@EKkJ@7izQdExq|f6 z1rmsulc698-e@TG`*$=5h_61=#ujl{6AbG2J{5Y;C*r$A8fza0dT*f((*PlG)44Mu z2Ngii8*>TEA3vdP4B10j9TKmhpbl%?=%BxV(S87F9YQQk3H%6v!Mk{%qd7TQ3<2V) zUotGOZX+V%6S{{U?Df7P55+4zb6h_HbGHR7!{`! zrbCgiJw&q*n)*<5h@+6?@ieTriEV~ zg(~w*ASqzd-Vg{FMDHE1;k(nm_?3sCa|Q@%2RTc_e#_pQK#JX;72A?hGOIBT?ap8$ zM$UROMk{OrZmlfB^7e6k#d*BK(T9+oi+fS=h~>)_g7L4cGg=d zPUje+LP?~6uIaTJkf(Q6N&sA9rYJHWlM^zvSULeq8x*DvuizKT{{094jr_RLo_}2B z(3F6R9M3{!!o?n^#ev;A6J=MrvAhhY{-DIXrd)g6N6#p^TnwWPB z+a}tk;1=2Isrk8PPjl%S%rgqD289mQqTH?c4T&BAPgGDT{MrFme7UCgq%{GT(q35R zUOEZ=qA+ycLToxP3E_iSc-7^nimM4@bmnxd~+&4VAytaR<=+9_&x*gr!lQGF34y-gR8kMnOEbYy(Ju?8 zreO5o=7uZFumR^n(U z9@Pk`CuN^Sb@?woHWN}^TQ9G)1u>oo%XnI=x{YTncuaw!l<1Xtv8mZkYRgqVFRXU} z4}N8zRFWxpQRg_O0!k!5Gq$y7%c=;JWobw>wA64`s*G}=VlY_P0&6E$V1O)r5&CMi zshl8@%QPbNrtS~D3~gXk5;-o|GX76@!cC>SH#D3|hacvN-bOQP-f?k_Akf6r;Qjqw z_x|s_j*aZXhs)B+VPH3F6RgInWVsJ=jrz(6&`D*L}2O7UbDO?8m2n!)_)QF-! zSmMX@!!GPzfTW|T)gXavnbDKOg;ebOca-qP$uZ|pm@T|5S5G$%o%Q~ z!qO~{9>6L~g!9vEO&L($z8Cn5*eW*#F&#kN!qDz5xsZcB_Zcu1*BM#R39Iqwtf)wt^#}5 zTprj)TP|K?y@Jurt>vE;8lIkqyfvmux#7rdyvjeysCoQ}Pal7r7kHIkb604i3ecjS zX5~5>o(bYYpnarvWq(s~!#WH71j?t~ib29iJ*ad7Fj%Efc`=mi4u;~V%5$nROB~d> z@Yl*_6;&%XhwE*cXh@t3KMZ}@PJOHqY)tg72Rt0l0mgL(V!-q^<8;lE$8vfIO`zPL z5Fk(s2vK3l1!6fuWXmxl*9I3AE8E*h;reEGSJh5d4!!CovQYk6893Z(_ebKLwX4M8D#^1-G;u#op^OAhE<>Rvj zJL4?x)HQuBc5Tp3nolQ8K2kVp#CCwtQ~eZkFJ3edN7#F)`;t_#+93mUhO>hu!0KBq zuXV#+e{D;Zj0l1zSRwfDV(L*7;-$i2NGx633-C=7-#!W*^ALw~cf~(3trA3}%nNfq z&fA>c?q9@%>Ad!X5c^e>11$4#y342@;c}rNHoLFmqnPAq2guNV=SH`&6F=d!5zDmc z)KFi=Mivebg;Hw$g_di;PQ^pu*V9YSZ&V&Jw~f~jeh7=UQ0J=|dFncCI>sK~p>t#r zww3N>yhARLopnqsLB4LMvW4aqWk?s1r9?zR)uM@^^|%r`D}JH8hA~x)%6F*X#GB4C z0Y`~-C241V(QtsX*A)ZYgl-e+2E^&7w5p29N3b1*8oz>EN&S$Cq|W02wn02oOL6<4 z(&L#@N!Z%{^tBl1wX7rO{3r$g1VktuuvRz})4tdT*GddT{V$-H>sztZO5mL)ezigx zPK<+PXOtzrAb-ZJy{mtAtCP5TF?%DW6k-hHu4LC=tEnm$j|C8dv${9S{4e&xOH2v{ zT;kNVaOFa&1~p$RxAO|-<{T8!q$}F7{wGQUd6HN)StEPBU6@61q)hf}$yaUH`GV8D zrpx|pel`z%qaO6xcAHRN0VPGfyiUT!U|2cQN?3EFFNx3lzkkRDjVCV&@h$f5oD(MP z`)%NHM5v;Wk<@>0U$iLip8$!!^BR!$RM0*e#nscZ)KaD|@;o95!|fQ-%4)a16+hg+ z`?c=ZeC?o#bj2a^mRKcU;=%aUlIg^|}o6z#OxVB;< zL;cf%NC6=?3;@yY3o((aQoI@8l6y&I^Sr90rT6v1xK}oLLPgm@Q2jjC+oZ==0Z;=A zb*uEj)gJf6u(-}X0T3f|La@j4#xr``R3h_B)f6FZ8R-@k(_!bX-`0?h(P=;}IZk+x&| z5O4oke}>~V$|wlE1&j*As{*AemK<;8rIcE<@kB{@^bkerocG@vCw?9q!j0($V7gX_ zDd!3TrJWvj#r+gvR!@v?E0Z+QAHGci`gz9Nv>i*a0F-^kPATme$UESMj|Y8?7yQOk zB-Y=ZrikCaqkre8@lmxg`lSU0UctF=gL-gZu=EvwB_*E=IqzdEiR#H*!h}(*)z6&V z>9$lYhuDE3CRa>_Rq1a(OD68pB?=Z~q5{D@q?J?;32C8G3nbVXXY2H^sJOL8uPW}~ zaD&|B#M?V>=auEWh;0^y2UDo_{8Nw+))|98=7H^rr1#*oLe z=LlP*b{^>8l>PFCZOXhWKd%#NnU$dAfSiij%1ZZwZ78w_&xJvxF^Y;)F_{?ShIYd* z0|{WAy&XT2kKz_@OU%gn@PcGA(V@%k&bUZL^Gzpn&PjB}^)fJ3P5C*J-(ImK3{S<> zra2_pb41iKJ!7-`$_kW%tH>Ki&;vdp4qB{Mk%%e&Rc7=Rw+9mISzt5?Ld+5CY+V*N zO<)^!oq_X0V%MQ>*EyeIYY5}Nydm>fV=xBtI8lJ-8HJ; zazP1lRn4veda+G(2t0Ex57Ay?khsLRekIGuD(b@U@5TpxkF#2Rbz`iVW+($a-k`yF z=suuN=))bHyjtb$>JTS@98tgEoJw;TX581F3hGC-`E^-%A15k%!;FhfITUsK>@Hz1 zihW&Nk5g%U-hKJyzsP1BBS}oBa)40{FcucJ^+GFYHnZ;w5wn2UL7KeP&2aWWB`yIR z1$fH29}sV#yGlHP6U4H6@1K!?beUX9YJ)8hTpAL4%Vvh=ddWml8vefcz=y?KKr~}` z&!sr2j>1J$?rHGs!eV2Kx%+hDy}kpf49u{6Dj5B7S`w;1An>gsP>R$pRLucnk^5Vi z66~0nEJIFEQDj>U}#U zwhJy2Q-0tbR8adIe}x5mTg*-B2MS-ahppm5Dwq?Jq0Wz}exDtjUqw1}c0zrLX4iJM z=%H@TtEO_ zAlDK?ctlGxzZrOy)Z2nPsO22s%}qg4iFmm=0MBdJi6g6EVQ8nSCa4<;%XL-+Ys8h5 zqQUh-W_IkHT>roFauWQ+myn~saDz3HAIc_HfpGXv=^8f@GcfHZkxm>@X!k&zOiLxUGxGGLl@8$qvc3dfA8W@tmXv=1{jFuqkcedy{7zd^Fk#q804mO zUnRw=*4M&isd(Qw85=~JND7=B#vvGXFMXn*ajO;z1NK5uC)PG9$lJ8kMr!=YW~snS z@RO zLGQjt0P)>xL(ZJ)u32JtTKs-T2?jBiA3W+l3yCAckVwSQQ9NBL7j@*tIU2uifz?zW zE7D1{(!0rW5+8ni4H@i26S6CsT^e(^>3so&Tx!-Pb6;i+F2)fvlCdfKgQCe4

#!fFDVLvyRF()o2^!MT!$)A#nszVQfi9gkUNlbO+I>xGH0Px8M6H1{?n^N z31KlAgk5x`5<|>9rBJt&qG17Vi$x3u{3v7rRgC=;t3O6$u!1`hs5Vp zsg@XgjN&0?m_WN?&n1r%*W^-9UJOV8Ig`EaEn61EdtD@FI6foQnXkDG0vEnie_8t_ z7Q3hmr37=3rf*_?ScA*ma{M%S2FA4Nm0{zi$JB-JZ^XrwNooNm%*dG{a_54LLv{ws z#8T5^&nOB1iZ$qqpZVDosVB1LMD!%|Rb?wC;) z%kk}LvyZFsFoIkU(BP_Lf>*wY$MzwR1+Pu`DvCKi!L2R>%h?ZL7A86?!E@UxFZgFf*r#E&t5E>R!D-6A(HS3PU%Z&-Eqh=_JJBS+S3(~}(iFHF~2n9g! zgvfLL2xfnSUmK5E$&qmq13uFM-^aCp+yD57%x(#A?m5i6DMaU8D__$I(P z#2RlU5y=Gvnv=WSy$G>sT#v``(*{cJDk&23QJG-zpdpPTqvom*+sPtHtvTYi;}h7Q z75c$b;KC|^<iV0Ju31c@({8T zgouiu;3gfV(M4P=JwRB22*_nAV|{XWNzGMoD`m6$5U!GKLF#wBuO(dMtxvJNRuh0(gX=4_~!%0iBLIhsCD)Qbt z&y#sNO>a4UIZWI|O8HmcXqk9?mfqN;tGSM*-%U1z=zf)G)9mVl8g&$q6{`K;bl(Y$ z^YKFticcDSod`9ed&|ZQBwJn#8S&COUU!;N5yr_2K*dAK*SRsSrnb?z03ajZG%nIY zM7y=Bt&csYHY8k_P$jzTF&R8oHEDqafbd%cxe0K;KvW$(Zk?WM1Yro#+k;nEQ;uc4 zMeaIwTddIV>P~1-9z+fafQIE@i)x_p6P6t4LWqD$2$haC0}7kRJ9O`J(R(IPGa7lM zdJ=m$k`x_3EI73Ksgf&@65m%A14fGn54Z|&XJYV3;hAW=@n=D`D=W|9&$0%%`?7rM zva*$8pN9YkysIFn1c=`MrR5IWwIxNCou+SlcjQlOP+36Ee_pHv7Wt`Cs;_4IT$MU3 zF(|GgAzKLsUeAth)N3>W6l@3fI97R%p`I7H+BuNEpDu~`Z?b%2gSI!eSG#ZGt5A){ z$-mqau)upQ7jtUSvVLAWvYa11_TM+fpEFyp+4@|4@h({}ZWstH2c)Sa%c@t)%3wD> zt$O*hGmG)s9EHm{FU&Y`4`wp96j%25R;kDD@0I4bmae^Ph+6YI;sUX`2sr{rF}A!4 zs4rV+I}{g?K6_mbTVJXGkM8cv2T=I?|Be61XZx@J=TS(NT+J=eA||tJwUGtAiizM_ zsP@U#ZmjdT#D*l;cmZS#YAXN?y#OMr-ijue1zT9m6OrPO7bW}0aw4x>X&#?65M|EK zLUrDDbB{zWFhEMJA997a{VkRR1+@BBfFVfSN-E$RuSEG^V|c6#UPq?XQX=xCQ?%fI zG!k7cEoMhC!CUU0sY(V_xv~2LobOoYol&y7=N9xU8ud70dLWkA02thWF&s8{>_#w z`xxbn-hZ|B@IxoWY9>Mto4w)+HK@WJNEi@90OcHVEIh%(0Dp&EnW;^Ls!ytgj`;(B zxYGanetNVGs&i4Xi`XPgz`vrAUax8WF7YH=9hk z?;a~RITXW6mjxKagT5#(2cCrl0ffW=J~sUOU&MI0n#`yY1*Loc|HLuw3!|wFR2+O< zq}^_P)p7p(wa9sZW9lVX~B@O%|PDHF+KeW{pCOjHFWGqr(>0*AnM2sTfnKgJ$@(S4R-Z;-jGn&I^MlkU@y zMBF)|!-aml)0Hehe&8CKPLd?>+ug5vbc8T$o!p-zR;y| ztG@iwKT$n#7(2)rL=j)0b7C33QpTkRF#zMuiySn8orpDfjU30?l6{U`ot7F0RM0(a zW$=nND|MwSbV#_B1WGh_8okq2ilM0XTe`0_P@7^o@lM7rLCG-)I04kk+@S|@eN0U7 z%yK!H%6q~dE5>C*UM^$nxTFbi&u)v4iD}MKQLDhKjZj_6q%3#$QullmYxIPCazBu} zhmA=WPu+goPFb<9B#j%AUk!*TqP;yuZ( zq=eyCvNV?!(43Z}mrLkmdaP$k!MQ#IBGz9uEkwJsNUmet~(d=+u~7UF_%-QSMXe6kXQuI$FERH z%?yeME=hfP%`rUsP^Beg_fS0MNedr(VDf`;>XSQ91cX<4Nq3c0R-hjh5t@|yasvmC zRH+y^W+2u=tf_<&hhlQ$Q7At~v{r$K9tGaaok~N*I+Gwk`Pkw}QiF-@m$h>Mm*w{@i{_F7o`4aUcEDp~k-Y#dbG!lNk+O7F1IQ>v=6~ZaR zB8+Jj&;b?o7Bn4(hRQ7aIgwn0eXkHd%Oi*be^k@GIB~F>i_ZY%R*8~cfWcC?;z6}W zx45HJl?s6vk%C*iBXa!|^_(W?foZJ^)Ye!}J30#;USLUGMp`woSXkzIQ(1``1AO|*Z0kSCL2zF;VM6jU200Wec4vH0> zC)CxqVy{f6y=$1$UnbB36S4UD=Cujx~$69fNMx3q^#mAoL zz!+m;_LxPzoFBZ>!bW+ai?9k94$%?$9C4teYD;|u>WiT6y{@~05aC(I3tPbE2Qf3^ zBH7zjr9qVA9Dk#$U?ElP5k$OBlo$9-ja+jzmuW6HrnIMpYahl-i;1NSU>blI#?$F@ z@}<=y2Zn%x9GiUo-oi9Q5m(`qI5(Jl5`ZFtn@BG2ETpp~a@)(nq;U#wC=T2Zk(Co( z;6cob5??39^)oN?Z*+g)edg?3rst(@Kr7oZv_8^yTwcVX5<^aiRD4JTM^RJ#vqGDf zsU{@;m;&*_55J(E!TsNQ!H#DX=#b7{@N{Q1Fom}f#{^{p+Z$?qJf?B&to9-70w*)F zaw+aW=fjVyNC{WpIgM`7t+ofx$H$!as2J)++Ur(5CGpE^ox)A%XQ>28gGXc#x$<(} zMDUS9q47Wrg4!T%M?%93N((yBFvTS!xhO(S!oHyBs^M` zs&oRU^s1;*))~ED!zM~0q6oGuMxQl4q=uAvv#9U{#;qSsN)b!KLdF_VV1rNCpg7keE2mx2uvLNCCMo=KBPmo@47vWV&M#{%ZUd`qhm;oBMYm>Rcui8 zcGS(|c@I?#w{~v3MpDstE-cAy-u+>T6F9{ok&^6u2zX%*P^;)>W9`GwiRHw|M1~;9 z6&TE~cmKviQn+z$X4>cIISUBRit^S9+&7psJfig(jmKjHbF#}cbt&VJ`Skz)guPjF zE*JU5?Q@eXH`~ab~V*A69@#BOl*Y+WL7n&ttSxJq7sNe2M~!wQk023 z)Qe;$nO6#>FKe2-wOngS{7rG~&rn)HV9Ir6j||(-Fv8u#)peS}(ui8RFld ze>Hx~sr}byf{zl2sDh2cu#LMz7;{q878h(Y6IR(;qAjuhs;<|nL_Oz&+*!VB8K=N- zlx=+e{lB5ModUtPsP?{1HX;D=KFXHQpZ~Ti`svT}kA}(p2A+|k^HHYij z`Yb9ry3@MWD%~zV1p(s|+mJ-Da7(zpx$nK0G@;Gj&Xv#@xYfrKTlAEO;cEb!sDXCN z)sDxsPfA?>H4+PCpduYUnLQmMJNQ`CE(588%#UNj;uuR9GnOmpa(ro;2Gs-~`SaV{ zKva-u1PHbg!{Tx}O&a@uB)sP(fZ8TycuoFQ+qAp4pS!I%<`#;=tAnYYQy(d}cG_U) z9)(6?rLkL^X-KHam?)jEWPkZ|9f9%IqR_e6!^%wk3aL}l47Cmxg@-}CtYJFD8)duc zYjzVMOt*uYz+6*zJ=w#x&{Cx!_>sHhh_=i?vL;=1TJp5t^Cuw>j~fDp)1l$JT&SXx z6cx6X!e2fDL(1Xv``XEfhCkL9^-WF3Ed6U`$A3QpAwV>}8A^sqSRYr6>UFzWoc7TU z@2TY2Qm6tC^UL%LVlE7_i4LC2@B-z2eP;3M67tC86hhyr+VvV15m7{7kK9ha$Hm-c zq}$?E2p>J->pyB1|KW$$+JvnpljeBz7o`z1`)Ywdv`Cqa7tz3PGy@3 zN(%L@`bIfOetAA8{ETjD7Ewh#me@2xmJ*VKd4>)^AEhy7;z3?ku>RxghTaK?&WCnYZ{A#&yfJ8jn?P zHeIo?j;OTb228?WzLkI10=g>TcoSa*R3Ssor5-zVGN14hUdN3$6! zsL|o<(jTR6)*(G;{R=V8`H1&(gdLB~k@1xs;L20jm~pZzuJCdEq!ct;BeGsz*&5;Q z)73}(HRgy4MG1>5>Q5~qOjroV4i`WdQE}UPv#S)VzmIhbkm&e71ZHOf9x(f)PT5=z zWoC06lej1O6FJc``P`j4as{#G*~xdD!z!Hl1`)%_iwUrliZ5|baJzWITktRl;(9V$xa5w0N?)^c z$&UMa`TEympQw1*_UuEd(_1V~FkI=<)11uefY(N9!rkNbax%b^U#6HDm4;O$JFRXX z>76CuYXztIFwOs5m;o!|ZC7Zz+@M=nIcr7AeK`xUGW{BCcwg$yIumjN7D(&?I7Yv0 z$CHQ|q57JA2wnZet5=INB%}KX{F93|zANE_H4*DnEeZ_ry#AL0nA4qB+k}IeSVo>Q zk18XvjJF_GO2%n2F28V=tX448d_}Kt16;Ss-@SeRztj05=}wG;uSD6wNj(KTr;Edw z0=iAQHE95rR!-}2s*uJnM8Wfs7RW!I%wf%V>0Usu81&*cti3X017^}e>ArK`MaNaPKSyw!)%JtkkXRxdxD{u zak2a~X82M4Cu*AzX@Hc6*1~G0khts*3+JMEu@s_{E$-g_R{3481eO>H`rdH07ZlXl zOb0w^Xz~Yp9`HI%5X#|g8q<+ld>OK8|)1_yU!c8XKtg0gk`n+f83)bBSn9&ES|M z5DsmA#cQsByu34nSw>lXkR{Ju-A*z#qEeQ;w)+ZVHd6uv%rVQc8Y`w1WYxKn+o6IC zO^R16$)y6UO#~l-0(d<*!V2z+p5PsNro>;iF?1(idF){!U;wr8Z~%4kVk`jHHwYB7 zPC5?ZI>Ohbu3ZlEO7lU~Iyp(wb26J_WpP!8Fe|-dD?|W1n4P#8r{k{_Wc0O%z5*<* zx_1U5qKYap^6Vk4F3ds;J0}3Yf=xp|2gr6}mb<(Pm#*#rZs!=^26y9Z?3dz5s6 zyKn(8h{2)K)e3r}kcL-U$-_&TOw!9!=&vbdNWaRRpBS=HElZeVh?fFv z7D80<#DrBmhS(LthacKPa0(oa)Ae#ATAzT5IGiYSIP|UVlms99(9(G`M82d~EzSVF z)5~;6(;uum*xcRm)*PNo!{_=zddZL?86l?Zwwo}LV#XGvBgThS3=#d}w+`3=-`Z-F zZF;?8RJ%LzYDj)D#8o)MjZ&QTJ2M~u`q zv7TaaD3U@_6wXyZULx`7@~MQ?U@)?=6lWv%Odcn&R0WlEF83ARHreeuqY z;gNBS#wj4$C%wfRb4r2?vtKq@GJ*TiQhNItQ9jz&SEGD<)80#c)5hv^=>9KvVHN9_kA<#r0wNeUEUJ+lW@}{)Hm7FSFGK%bJq`P8Uy8=wsNBUV*u3 zQ`Tx^gz@_%R&9ZA?dz0W0qw)(_}GgKcteqT;C6peSya?XY(N^qj>8hekS3hY{pk*Y z`5exJZx=IOD;x2xwawDoN-U7Qvim$lU4@hzGfs_9%BW3RT=LyPd{T*$Nl{Mm2=8eR z77?8~V!{Lq46 z5o4rmA(HoB+-FXG^ZuU_{}E3oa}wJ-WjjfcPQ;?~lWDhto@!B+ePl2>s1$%!15nS7 z;%sWslOT|*v*``m_A28qT=$$$v}No9r3!L4lpCbG*3tBXg$q;PGr@mHba~*+aZh?a zu#5Kk{&8KFs2AlHL(~U;P#HDxS5zw3OpHhOLo+%5RrxS}kGAX&LYMs3Z@j!8{Q;S)N>T2gle|miKDJC!6C) zQ^-37*D=P*^mVK!DV7Pn+k>V9pTLve9(8&0^(0ptRJ?`wp2g@{ExZv=M6vCAh2*hm zsOgcEPml@hWIAFeWPn*_smD1>_xhgD5P8!94^&sYqU8;Fh+Typ3j7$d`tc)b5PB@s zMna37HnPe4|Cn9WXgrHN50Y7ryvcfxE>TYCD1d@(YJQBPFDZk;3$^ zkR5bPbwT1qL((8pIcc>UV7l3!cvK#>Vzk3~Ic=!%hUC=KA+NwC^0#3Xc$MU+;5hlS z+_#QH^eKg56iu#Ni(B2jeTr%S<8<|}!QYTaY#~xYEBfjpmbr#CT8#<}Yq&TS9q@Se z4$8{bW=oJ%G78Bg9v+Bi2ArWe(7HIeB$c8KtUXBsRR{6mrH4gb%nEvXSa_5>@srHe z(^QU-6&H%0c}Bn`z;NUS6D78CQ{{9_gUY+~fYoayouUJjdS`QJIRQY||XuBa;;K=rEqGdl3SyzrMnom(o(i^WYkcsbLPWq;W*V z0VG1#8qsaxz`lrQpK;Z3FZtPr8rsp+H{>=6E<-}w?E-~=ky?-4Fj$b~bOl$!6b$#Y zfiL8Kw#!QfX_oj?qL0gK4F}`~a(N6=K6htt)t}k3mDilHZ8-vVeu@CjcieY_Gy78Y+%YLkkDhn?q-gC-_ zVEPz!g-ZKXqr2%q#C59 z3dmgWF8Yhso=oA65aic zBcO7L2CinTp_E~hWFlUw;FLTAhJ~G0{pGB`kU7XGxhugT-vyxcK*+>ue6CIIX8aoL zmXzDX?a1x2Y?4>{r@?oW9@>jv>R$CyU!AEkwqHd$$mjlv=B|%pgI5*);P!37Uycoj zm(y$wD$kmwzi3vGv`pHGYy1_wu)jw9w8Fo>vmT`|BH81T5;+X><$E3vP|#+J^5F9(L0s?{&XY1{FA`!!oK(dn zLc3F_I!SenDy1~!7h|XaRL0HKfcLQOr}6bJR*5Gd+vm2{flo4Z3=ilO1VI9k=Cvx5 zlDY)mXN5e>|I!`$BK~;AFw~VO#F)&1lH?vs=QQOwuCQeCR0x=OyKc}0DgS`0Vy5!4N0+^ul~1$hSGx z)ny*c3&!}eEiD#b%wLNk@Gi#DdtKYf>GWC!*8EW%X^CmboJ^z5j(<8Rwhs}|L_h%4 z;lO&m*saQ0i|>x<4CxABeMFa(R(~8w=PTvdx(L5ma2k%UvG8#2{hc_uoTb`#DruBK zw;Xu>N-&u;heYmex%#GJi04qH5qbh+?3bNURs@8~6 z>Th#(UADmydVgcI5)waB>yP50?Y8o4Aoca|`JBSD$HCGOX^+Q~pvZqi!|5E;+`%px zr$k_~_o};g+TcFr&^(QoD~<^vGB%b;S5U5LL2fWY^d1WWdtW0xMKU^DJ)0a~luj(~ z!KFC#&Rs%AGd>QYdqy3z7)tdLoC&M*hRuDPs>WdMRca4cwZF7SyWjK-f#0c1(FvCu zK}D=3O9pIW-FU+=k~Ygh9GlO?6lf2BI%Ry5##CL<4?JNz*21QYoz@{LTXxpqCAAslKARo#t^qW3u6Yhh+n85Ef;w%iquKG2Qh@77-pMLqs_$qHYI6X_pvWOeP)<-ymt4)y@6@4c{H4=@SNF=SBXAQ^h%^&R%*O zik_>AGC5dpOUjVcZFI&G00WussxjK{-olvvZm4q>KO%OCFjgB`-2)VHVqQ)JOCBGc zHq&LS<|S>(NC5xTPbJ(1HigU3tb1uH_S3tr++k9>yv*Z?2nztRu}~sRcvTh|0bgMb zvIzLw6i)K1m8FSj(6Tdz4y=h#l1g;xPp#{4L2h^78mJ7V!J^_a@5DF;OZc)Z_Digz zkW7IOE;(~F2eNsA2YFvLqz#njxy!G^*7aiu z0~fd&huJe0i4AngdpMs6Tn8V@)EN#co1;#pep(}Mrr%w{Zo)o;Vs5CRHk3UFZO|d` zdlpRUT1CLXBeH_gNtESYA;bpaF~Nnj9I70j?81hx)B*%cU0+XNK``F98`sOKzkW60 z%EF3IBNt4LrGpOe451xVtItHTEqKrtS=lQhy~R6Tl|=VAmt6!&$T|%d67^>x+X6si z5Pn|g@D}Xa81#wrK4T$J21uBnsYH_;#%Ag7{Xjn!V@yolWOCHjt@FhB>sLGwyU^c) zbA{0Pq5)n;RHhe6FLv<>YO1%#104aBD^w!IPjPG$TjFv~8Yv*3ViqlGNJ3o8(^l>1 zX}kFd;X5HxRg$m|VE>-{HXHP+!XQPP4-}( zf*4)+bQB;9O*18>E%xv^0#=h1;*S)weB{MgXL&}cTg!bzA0y-#H1uW+Rnps`FK!G9 z?^bjsKdg`8beYn+IG-qDXf@8{lzxH46wkYR``hK8`2pvsm`oHpB25QH*$f4y-K-wq z+v-#_yQVgym3P)|`|9B}LIr*cI@djTjsPv2`5@bqcffWJb2|ILo_LwnKqo zcg@ZhJ(xmV*xDm#^dW~7c6=NURxServI}~f7 z2^W@z%hfFEe5kafH|u2l8fVG+zSZx{w-0YENApx?=Q#;+gr3+`e-n+dF#U4+J5e_& z@Ys5cf#X`GP?t;+5k=RRb5$6m2n0*X4Wu`r?f_}5{J7Xd-RwiBP0bdeuXh7fmtNj6q?T?;`qSfGT&IT4fdqIq%Xei;G&V;|$p#_a@z z^X!n2+L@9^($zMq-7{3SImTO)VIW3^K&iumvO7R6{41&v62ZuU6$0bE7Lr09)Q8%I zYE;aIsE&unRhGbTLX034$uL?gi3!%D#72mnyxM0T$0|{Siz__Q_&e8XE(&yt`sTsj zx(W~x()LKUI350}Th_MXCnk-&#FJIfvRz)4pOcZdi;`Z#)yx5yBo6x`8mxeITEs1s zF0RWo-L-`O&cVWO(+;7ySUknkLcmx9T#m**e*a5q#l@2B)hcb(oU-_1p`9Gdiyw~C znC26Be5AGFYs8&JQltW4`7CW#z^IW#c>Izwqzlzn8;Q~jUR#@}6Ni{-1WS#L{TOE^ z1}l21496wRVEwo@i?jj(Afl1!^ebWd>8N>%aLVRNBqTOi;b*TC#R)Rdq#`R^6NcZ! zv@q;X8e_g0I%-Lf%K1rGHbPH_fORsOzMPlW*Tdvsj;cs=mc6un_7SRxw}m4LtU^eW z9f;Ehw1NdPO6_<)0&{B_dBoJ@SH}#ohA#*%`pfG;KDiusZ4J(bvN|EvGL`l^p@Jyp zpUR)__sZT@jQAB1+UkkMFO&-|-zlk3a5kB_%`LfmdjIox{}H4l z{_?%@Ew9H5t}ZVj9411Oaq5+>_hCFlcoo4ii!b>)m~qfbtYGM1iw??bRC^c!Bg6|| z2-8cEPztEZl+6|8FajWkiWVA61o7^n_kjf=+o%bR@M}d8V$g)ib8XsFK8?+kT+NOo zF+MFcJ4M=uwt{>U%I4ZpbbF0L`DVY)R^?6s0K72}Nu z(b4E*KM2F-dg%QG_m%I^H2-?1 z_);;bZ+^3U)1%N9$ufvNt1K-48KAPJZCEn*@AEX%l?!h4B%Z!Pb!Wd>jmEMG+Z8h~ z^FKN&r0dV`5%zU+_SIu5Xo$#?tXMSX=yt?!mt)okeJG-S&N^nMEoJgIk-OLkg2(tr zSGM$2(eu6-r&`TyAU&bvBb|KVm5jy2q&FThNT?pCZXa+;b6q^RKRB5G%Uh> zad^HYuE(ls3iCA=syxBsPun9@_AmUvJ_Uq55$s7ML`N)Lg!JmJR(jH8;={*k^cO5w zOBra|^jgjg_jP=z9{b6uKLp!_93w%mZsvlFrJa9D9h{P+ozl>{5u0v!|3@gz5V|Wq znuwYpkK%_m+5!cvxR0i3t%NY@e%A#T!n)}DlaC)=9zO9}v45>V1xg1{3arkWucNYp z**6(fo*kiqtvFso@J}$WVSnY{!rejYi`!tzGm0*6@`#3t}4N3dQ zB5e#|qNol@;k9;aMVD{&?7cqx$Mr=B&E_!{JH+^Ih?W7l}B_)Cdwt@ z?MCu3eH9D~ZO2`Tr~1m6u7VpL+yb#0H&28r!n{9b48^Wlp0wJ!`wuY(L03Ntc@Y}} zK1Th&Kl50Ge9$!gDqHnjkMInd0rr5bJ4^B>4@%2vx_gWLnU@qB#z*dBC}6u- zYFMybb`__cPq?BIt|3Zen0A}`&ew3d^GBWv-Z=|n?TgZgKi#cPpH6*io4thCA^?Z% zfJ-n>pi50aEz`HBW+s z_ShC-R!b;%6)ttju&Fx0lAN?l2*gC}vUtEs>1=I<=-`1<-glg0%Ou+9#0#bIhbW_W z?fTZFf??uh?`KgwhOP5X0|h;Z6(H`B3C)MfT(kX2s!a2XMOt|B-5Bb=;?C9!DxzK4 zj2_tbh^wqA-$h*xgZ!9vp+Fz>IHMl+gI#@d1oW(klxkkOP{lKPT8yDTQr#$ z__u$vd=NiP%lkk4^&fbpuGz5apT(#Mt57ey5hCCJ+w$&Lo(@H#NK;Gp!A9BWTLjnC zV*%Q8C<7GQ>k<7Bs^?{;26>r^l5m*wb)ajT(8}dm@@Uu5?;M#JZoTXljJpWXit*Okp#xrI6jo=$3q-dLuT~Kz*)kSBU0NE*>54maP3kMMmT5_ zNs>nhtA|M1hsjSc6*2`~Ep6>3C>#u(HTVMHzuE*t1EWY%Y^$&j&!>8C><)}Lu?(Qu>t5U z-Xg5%Ml9}pzjCsVhtz_Lx!k=~Fw!T4a#&bV+}d7DG!PGUoPu^X9shMy<<3-=r`4Wv zITZrla#Am>od{JELsvip(Q5aJijImSmkqq0&118%%M+siyUI6uLT3Z{dsxA zt(gZ05T#o%B#oe?QXf5~E8%B^k@$>%+2ck#lim9=1!EK{z(m8oNZt+Y#=9|F>QnBN zH6LBzWo!rGepb7W?9o$40$ysTULU z2^g()p)B^pR#QP#F0UR$(@*0mVqLexi)UAJNBvRa&RxHwbM&Qp-DJv#*d^Qo$WmyN znlpg`7R#j)MGMRq(N5_n^-(la$c7pQ9Opj8mxtW!ggE2bM8bu48I&Ly`cM2vu%=fo zku8NtjLQjZAiuvV@BS%fH$DXpZzKt_9VrZNS9wsG%ZgXTQVnzx;>*z4(U^Yq=O~hV zo=8})315IR@qJYvAA41va{CH2RyfTk2sB&j7WdEx!qLw3tgF&Yg=Zpku>;=u1(JH- zxu~5wR*$5{otT11vzsfHP-z8}UAlrmOJ|3sE!5a$Xac<#3mZSkz z7fx}Nf{=CGqGpd1c}NYNL?^+aNwEzEs>2gUi%MXH4#V-|LX9_92J~!h|7=*HjkW@{ zhG(V&QFsBb-fKYduF8#zVJhD|{<;m84*WaK2G>bVPW(!H!XH3T^X?I9;sQY~jgyl8 zc@FwPy`^||?=?m!_JLc9-@U!5fKNydvYe4gHvH5v5Ox~D;St0@QlPR12VyEOSVGGg z5iLfQ;W$EY3j;6JJbNajnHPtg>%LTkI7CPViF2ine(8J>pOy4UD&w6%FynA8;`c4?dzyUpH$;_Kr7 z$R;V}nz3EK^6&DW|7!6YkBQcGz>f`f*}@m)e)6kmg)RT1-7?Yvp6(jUm>6>s)hjJi z?BYOeLa~6nTyRiROpNHWSK|`b>x4w=v$YI65h(wpPP8{%#zSzFkfF{4LBkv$`24Y2z8ZdG37 zq9vtwdrwZnX~HIHdP8Eq2*haOI4ke}xV-z**kp3Ar4^PpLE&Rgf4Yy!U=vo&Cu8r0 zTdMwb3>evHuoOUAna1nZsLYf~Asi}Yua;zwA*jjC=4vlKKW8!9wYnpS*Xz)Mn+hP4 zfkQEI5So!c1b>Aaf)e6hn8V6_P398OTQN*M1hC|bLw{latlVCowTe*09#U9!NZGS# z?y$UjeoI9g*L(hgCVlI%P#`SaXX9p?RsOgo4@u^vO2_Nb5LX2rzah@n9Q*I`L5pS4r>lUNTh-p-6kSnIrMZXD#*L+9LhY1$#X+}?8o8vR$RLo;$SK{eWFiArOegZ z9!}Jb?L4-hpIN_{xReGvGD=EJgxHHTZv_c8x@6=LS!U*7Q6WXRF#SQiK}p#M+rH?i zETA$zlj-!d$v!NlS9q`%14sZuJ(AnARt9^q$%yO?0u`fP#H z5HSlyiGjlXV~=$8m$5fPC`1rV_`a@s@8ABQq)HMT-&LRXi?K>hF5B+?@>xslRX)^{ z#slg}PO@6ABn9c+HV054fv}?lx(Hh&Yd(#d?tD!-%BpOdx^~$`z9*v;E5YPvVrdc9 z;0`2>wK4+*mKI5CP3qSes@g0aXUfR4EOiHc{=m_&z!L0;u^A_o+qxO}%<(coU9(#y zd44sh(--43gF<9|eG-T272|1Id(87(X^eK);4iW1Tr=mweqvki-d1o-@vKxiMO@Mm zPYl`NI>gn*koDX;sN%6|(pwZZ%I0fpdmlLB@4Bru5~)^Z361zHp}iwE#iw!N_47MTiZi{VR( z&%z!j|FW-Z-gq3+IhT#-b%N$dq>%CkJ{B(&EIi}nwX+6qTM@Sy8v!sLdgC9J?3bDw z3p|jgA&!ebG-FC>xFF}UN);CMt#eGW2$!O~OaJl+pT1U;$}EG8Ma?ho5pm;&K~hyA z2bYTy&a9jw6^IEC?mY z<*)m(8$HY-A0y5RqrB(Wg!m&TEX!?TaKf6BtY#waHYi%Iy$ZVqQ?1=7T_BWm-SB>> zbjJ0YI4!a-S$*S>Z1-g{7Y>zzGj8xJN{tWX>x`3~vqbSTSz+#m&>iD1OY2XIajcl& zQn6SV{^B9WiNjAoGc5;FFw+H;#P-%jNT&&PU=b#{t6q35^h=wOuVh1+KDFo=oQulS zgI2tfPT7fxL)eqOFdH?YhPetAHe;6K_CJ2mqvmr&N~1=4~nr)w-cCGumpH zgskqiyEPfs0>ok|nB!4Gfh$z#B8fg$Tx@9(w}Zy=allq(Upq?rRUmMiWn- zCt@SOC(oM?&m98*B;aPe9#hMO+cCbkin?yx;r-3MGKjy-q1!aWr*@jzS@n?6>NTqy zv&7S^&Fqe6#y+JRjxR`V;UI8RgvuZ-V+@TQXD((BxWWecNfV0aym7#v6Mm$2obmB$ zOeRmTU(4mRwUjG4Z)3-SgBbU>vG~;@p*GUx(k)fXy;048K(R-;QVW7zH~(m)x@)TU zoYd_e?iyc;r$ZSQaEHpDB{Q&6A}7;0i1b5Foz28BCCGkgPGRe9!jr_g(t2*lw{_MaHFR@mQ8~;@1_HV%w|7(FRYj+)v`;Fc0SX#Z zLPIw72{OO*bvfm#He=~QE*4O0b|`9SJGF;`0M`Bnuq2)*EdcR-mlGL# zHBcYhhQ)^wlYCijJq%oOFRi6&N7$;o47W=qad|c4TWlpWwnb#}kpT5i2L#GZi)F%3 z!6cZrbyVugw_>W(Yf)J$FnJPFbnuw~(mf&;Q++Ox)6vMFM+8%0SlTKU}=lZx| zB%s^6uwpbl&aQ+thJW|=_saKvvju^^U%vnU^N;nro^w3CrUZoy*LVRt1mrPJHgFfp zDtUDAfqIc(G5c%}X_v}2`mVvM0K%BJ_$-Yp+kh28ZDeF<;qA6m8u|CVE}tW|;3j@c zALRjHmJ5AzfpU$da3efC#(vVU6V+2W=ng`l@Gh}D(^UB|Zj@iTV7j(TS%_bg4HBtT z%K_r;5?aIViQ44-SS=0KuYszui-_Y$ZI|6<7LJs3!Ls@N^y3r_E@VZ?&$EU!tqf~* zRshWSyZU0B9!3D5TawDUpYKclw#)cp?N}b)s3us*5);4>&OtWnhFlbGCm-x* zjEuySR$&*L43(Cy-6OuCvN~DmY!(=}u}qbclHj=?Ea_<~<5Eo8a@NJ;r`W$ut5q@? zy;dq`qvRC!&3k^}dUnbKU3LVai(Vx~wTjx(c)fsF2)dFzF{Ez0Dlg^%?r zUcV<=g`dWWSVqC6%9rZeR=e)1xlf?ShqgxUwHJEVOG0fib`66wRumn$Q@Y+oNNe0y zF`&<6N5&6W$n&Pmq%0`|?kB_SQ%%HN?}biM%FDrB@|;v_Go^sT!y|OlZYO0(H&bX~ zi4<1fpB@P!NzA+s?1#p`>?2{82t4vHr>V^yIUZ;7P`q!@W+>o3rTCp?RV9c8 z@sc{{or4rI_DA;4ZY8e@OL#jkx!>mrAvuB)+-zED2D!r-omrW)L_XwVNgO6Gi2^gF zNe88GUFV&PmZM}l6UohX(h5N?6gI!LEId;%_ptd@bYD!|A84X7MVD0ceUF?pztNCF zT*(YPK$w*cQjkUnjO3Z}2({rCAmDv>Q0}SPw|2?vif$Y2& zz2~kZ-jRC@O5?v$@U&^Ydp1w@Xa?vN50)SV~cow5|4gz&4+HIvU|6j%n z_o>2V%mA0;p-s(3Q%Y*D0>Bn;9-W(;-iK%WcancTZJ8dOBZN0z3`T>A=&1mYsQ}?jFdazt>*EH zt^BiiaJ5OZW6C-B3LH;+j0M&iYHox(EB6sO4|jl4E=@!3sdPeoR+ll9GSY&QmHgv* zy)&+^AkL%MV;xDD`oha17rEz0oq$OBneT` zR5zCG3SZ676NiIa(qD&MS1)ES#AJ$XhB@tICPC?RxNXLPNDEH+#cgfYm&9^95BX9q zevSs!TFA28TiTRVmynWJ6TUAyAsxRnh>A4HO246%k`jR1D$8=7P!ch%iB@28G;1|` zPPjCd18Y#7mugLR*`_6MxVy~N<_?hct{X#ypx)9mK+p)9c~NLgNX;-0ldy*Fl2L1y)e*H}Mpr0n*$Yaq zV{ZZeC-A0ENg_;|crsEdPperdO8(fQv1!a+LCNOd?Gpnh#Wvjn>!rVCffj2AzF_Kz zeE0Ul@`2C5uyzM?k_}{##QVgI#EOqaI6w=GvFX$4-voAB)YGn0gXZ=tY8m zV~!x)3fiR9biBy>=v314tocjNWgVg<#W+&U+FF(^9l-ux*$PptmBN}nt8HM7$WwOr zyp~CbsX^zP*NUiKnO9U4ks0WZpp&tEk~yjzDpIqIIjA7cFFZ-WPtc0cMvxmC>yYa* z)nAGR#+fGY45`N4c^Th+b6XPg53g!&wv?&{HpMxm?mUiYPFvjMJT=D@L<>h!RXIqxl8Lv9zAQxnkxAsaMj$O5!y^=$tMxuopMJPig@sq$%;RQ5K= zOIG1uFJI@MSiG99|7q%HAU}GMCt`(I)xj5GmY$SUTeu4;GM)uK%DmG)g78x=PV(r( zgJW3^;u{=x<*moMz&&bc;^U>B&5Npi@wvUS-;4i-e3n%5{xQ9FL>Nn(wg*`PbD|5ASa2VNe^5)(w6dIeA;q?_f@_aSSuDE`i_IO4OEMFqPQ`Ck+XkJnO@ zAK#&=4fU3pw$R89#!KPpQ!fN+hl7*>7R=E-i@4=6-u3bM{=#PW=Z`>37G+Gm4jT+^MvbC)0$meqh$~({O1bjGBjEK4WQW2c z0{7bIR*liwK=lZUb*(&-HU-QdrgRa1lpb){h?1TJc9OOMrkFPo#l`#B;`GNFA`$Qai@l8GJd;5X!_;-*CFnV=S#2Rm6#~uSZ#LwK{~3OaeB+o&jb zlgUSbpMiiRrSa|$acbu@z-xZ=SS1qO%@3Md0v(@m zMU!KfTzo}F7C?~sQN#?9wVdbHM_;2NL$V)yw|uwBDQf9ulN)kib#!rEGJ9dil7@M` zXki^EQQ>)*g%**HQZQ%Dy~gd3@!KvZUcj2f+v$NpsYkEjaajS`@!oM;N()eP&l+a4 zw$Ni8D6cboXug&ioq=2Jt+MI2CYHUYF9w*E%9;e|6^QbOJzr}14UP3M@I{6%^#>mmhgIF zLBtxkOm-W$uQCVH-Tjoy9(8KVg;;5=;>t;1x(2pT2unpix1l|F`{l2yy2N!lnjJO_ zR3L2#$Tw`+hS}~-U%vm>@BTy`3B1)(Y$)h#{O{Km=Uh|XxIH#{l9>GK3P>A*+y%2X zHUu_|7SAs^6y-s5-q^Nj?VHM^;};m!q%ES9{firzax9LQ8!Ja~^(KGH4Z2DN8`jH4 z>l{X?krM={>X{P4Wj|4O-YyuU4`X+0`J7q85Cv!^)9nnBGDfnWcOnimbIQM>O-)f4 z$gf}zT0ol&GLC%W*K7mzzz9E^Cym?PJ=iY;Z#5Gc<0j4Qm16JVAEnoFR}i~iBBF1? zYdmkDuwL97cunLIzA-YkH~m;X~!bf+>BVNC4dw+6rTo}4Fb6LpS?SK;Oe;~%K;a)&od0! zYD(nhFPNzupBhPsW#z!`dO1{<5g*^zT2!!j085i>O4Tmg{DUVbKA1%fni<4oYU*0xD;58`Oy7l)IIa`o>Z8 zJRs2`XBkb<>N#rw8$m9&d*f;m$d@WlM>WBvZo8%fJ@q6Lm1);pV{klk)hxmO`dM zVTa>cQ(RJu%*IVz7EssY7;zV^a^uaoP@NL1y}P%ckICG*NLF$mdu=IYLn$odq?n?u z3G3)=$c4MA%t_DSN-^G475-o(=9V~){Sl>0 zY>^V8L)z2cY?k%4SU-9Gq%8|?unvGmBb6&h#*!m%6u2YK_!H;~$&*qSbr7lwT!iQ; z-t_nJ09m5s5L`9OeV|LmehI8T%wEz(skvi4Ncm?i9=q$&L7)DTi(YZrc=(qB7jcw_N0$1df!xKW!wMy zC;rkECHOiN?4d%? zNv@aB-Q}*|58>)i4b!DD?kG0+*Oo1o7FxO48dyO0^*<7F%<+U^Nse&H5KbM~nyLY% zc|tqK4xcUBJ$oOn=({MEf}L2>A-HM168JFGjEvdvvPJAX#xFG0he_Ibn3#Nn?ec^8 zs=ya8(@8Qhy416qs>%tVP56l@8xN0BtKvA*^P=qS@2*pFZ#zaXER@=4yE9Os!sa3G zA~GR^NDK%|a$I*rZeKyejci zx=8JY^sJg@{ax}uMArOFtOZ{OB4?D2`qjMeE8bfEuQ4ahZ*$;oufwZCgp(w=971yf!0m}`z?CUmt0A+W23^0=dBZt%5!gcSr^whf-1=ZdHy%mSfB3zpZ1G8AiqNspX zfM-M0uqoNt;cIWfXLF9)DB(z3D0>8|sX_68K0Ans6JY3sekJ(j=fFqDG-(*o=7bU! zoY+M3y`VhPeJOkoN7uP;3?clpxgWZ!>TySj>;y%a=p`<_*LDC@%s!1%X?gr9u36Ip zwirlEfN1N3`0qzl+l@h@~UZ&YBA=OQujT2#}a;tR#TO@Gd z^aidcFY1m8v79%X6c-O#Uc7@#7;V%76<`L6_9l~_0KnB*JJIq{Iy+7Cs^=UAD}(eL zx^x~rYN~h93QK{>Hyc$HVQbv{j~xcUr4hEA%49U-4Ezu?m>QQ}Xwixo zf$iJj{2G&27il{dzjOaj(f?bb1}aeqKIB)C+-I4WZC^{y`x{+KPfRJi>JLGcs{M(Z zz*P3zG&w?x%B;(s`_K+oFJs8HM$jKGszv{qC-y@3e#VEX+({+dI96a)4=m>tG{ewm z1JO&d?o!zpHSvZ0vbSyxJuc&MsF>d=zaJmMTuJHu_onhvnPYlga>pht0M`|?BVJnV z{9*btCDsSM{g^AENOk`LQXnL6@g=H==8i#d%o7iGKL#1kig zm;pwziv=mpe9_<)Wpa7Am%v_a$rkj?B&t|j;;SU3v!{n8G-jJOksJ;X;pwd43OjQP zwZ&G%UpJHklRz(BQV9wG1XU!rF%=-KjVE@0nCi7yW-MXG=vQ}3FQgbtH8)L<(zC=C zV!!uun%oQ!5G*Op8PoAAieY`u4ilz1PwM~Ka9&Qh6GEgEY|Vj$RRt2Tp)JrID7xsI z*yUb1Z@tSzuf9k+h4dJ=I}i`tG0D?%*|Kd8xGi2g&clfZm_QV21ks11d+*4Uv+-rD z67KY{EiwR8wp;r!%kj0FT?REBGj7%d2pjZ~HNYjhSW(O-M3i{v3(X3}A-haQ2#ZTVSv6Jr_4nSBz}QbS0Co3~30_@d|^tr7J$#vO(Y|9&gA&Dm+gw`*K8SI^JE}=9Ou+}?vjfXEZ zI8B$9vD%iKBm|vuFbF9FZKcZtN-)E8>m52tc>Sv%kSX!+|4q63Bd_AA?s9fCRt7<7WI*M9bo?RD8=_Spn(!-`R3LHVf@!i9?!Jx=a%UM5g z0`6PC`@4HoQ?6&r0@~qrjk$2iE&BRc8Wtq1SX)I+53GPz?h_0-d`U~F1LwmKL5M3e z+UodyF90*V;sOC`eq6GAE~ePAIbK%_Ak&h$ZL!*{wKA3ibgCx+Qlr$`H027OFJ(ZN z59>c6BV=p8jQ=J?@p+m&w#T&DdrA;R)5f3SBaBcu>3USL=-g8qI;L24Z8s-sD|Llh zO$SuclrUTQJ(ummsF&nv0;*11vuexxO93mfzOY6(O)(o?T^1!gd$d$`Be?)t%0Hwy zu-f=0oas;U8Y+06OchW{rj}M2sv|2k88^oO+rP{kO`OMl8taCqEVb8Sj&RaIr9;Bj zOJ}=h`2g}m{|JwW8Pef|gA*IBl1gKh-woyao8?>aC~87YZ+u8l(!eSeH4Ndz2O`GN zYJ}W3fn>+x5l_qR#!kFh@5-PBDM*CQMPZBQLFk2g-Sjsuw#|ZVg6Q50dTEP_mrQi- zLQk6_Zz}7UC`&Y1Owh-KTOKirRj6c`+1RF9b<0?l82X8D`*uW+RZ+mz*Q=Ov43|K9 zg~WRy4FD%+zPC}Zx_ps`8mJ8e_9UYCF$8bXkT=iCU-(*JXQC zZbjJl%5{MF(HaS1An@}t8%v-r3O-V6(F{v^(6wur0tmnYF}KcY=3b(1u~^Wi9^Z2@ zgG)mLUoe@alG*qPm#R`8vsGQZaXNN%mBM?^WsP$r;$Cr-?AKI6(iFL2O}82-askQZaZ;A>h`An$nmp6)D45pT?ahq#0_^ko+fB#qviQOD4%@SUsE3Ko)yJPc^ZEMw7>vv+82~K8#|UKt@)D*kQu z2L|{QOIJ9s4N+gL$t`Q}E>q;iw$!_{!be@0D+V5Q~nZRQ7i|u?|?Pa~z39YL&HAUD#N> z8c9}duGqyrRO;lTdvZ>6GW-Af|MJy8cwCRbX+q|JR0s;fx#dKAtRFonj~@8&ji$#m zNrL_3<=S3E5?ksN*=L+ZKeeWn0o7$mMj>A(ZMPuMCOy)}NXtO$&KG6iF;RglBX?v? z3(E^tOj)rvad;_b;DL5YZ%{r{Jm~#j{rV56P8>h3aUe%8la;TW5+&l{4aB+=fGEwE zp#^pV0jb|yS-4h)^gu@Ad9n9UhTiorATwhx(j3pOOjid|!Eq0|W&>VM)BUKl7R>9I zuDDK?^Q+p|K&jrXysRD&Qzt$vuZIjY^}qT3sMu!FyLZwYFlv+R4L^g%hP#` z0vhC|r37dI70%Md*9A?McmF9qeFe2RWc$8YhmlttLdcr;*+rh&3F-~t{ z1{~V#Ru49MWwV9bsnUeO>}3_njq$bH2y;q!bC$Ue$_FV08eI(~e4fNZkIvUK=G#V0^thsP2@i2lcTB)5b-!oegR`RIq}xMkOr=0`$Eq2 z(yUKw;`?=C=8kCBg&ItWd_uZ0wYh7WREYo;PlTMrb?dcT9o{0exh?mX2!m1_Ec+DS zAg1PVvP$q3r5Hj6$P8_T7eS<08t=7yR8??ZvXP{kxMkxVggR=%aXjzC56bsKrr6C} z8I{e59IK*6u7${DEX3Zgyq;gw!tS{AKBJ6Umi>?tfZ1mhCfqieCn^2I%LC(AUrApO zEOwZ#i~%HJg(_ky_-@i*ZECvdDOQpKz(gx(nSC2-?Nw=|;Ct9_*vnq&dsmV61Fb7{dfyrF+Zi$0%+EW$w3_+!UY&~o+6zY2-T0127tYZUae84*qJwWT)5%HQSh4e zv4(vgRLVh1#8JRssGQGSAMT`o>Xn^AEFysl?hf1$&jpjn4&M>>&}s8rUTi?S>-oFa z)W~I{LTds^%<@%%_E7(WOeO(mJUJ%EsU2M)$)>*PIdn;#_~T1UR82CgbtIE|9jMfa zp&2wK!lANyF)|7!Lx5YNy44eD|Dl#UPgyPs*I-4@6PS6A0EOf~Dvy(N3~Gf*?+5tq zQb}ngLJ(K52n%GH*W@m|oV3_lqu3hCxy(!eBfmIp(~hpPFQ?WgGx6he9ykFW#}*id zgP6NJt8_kzzi!`4zn`H%;T3^Uf@evouTQbMu*9tgMs#1nQo~Y+trn}H6$^r``a0&I zyy$7GrBzc^e3y#>RGB~?0xF~4GE`?yvF4iRmVj$1Xm7+vi8=|dn7hDHZuBRhB|;gp zfFr4E&(#HS3PFxiu6I-mP?*9;bQ=S2#1Zu>tmDMPqGiEjC@RDhtBVxM14>c2PR;0C zHZlOy9>!AXHgM(z1Ql#LH!mVGp(1;J{F>M;P5a4V;}%polpd< zTBP3nBVt??)#C>BOgX+d4^{%J^-~odh!>#}6!MHsmYZE^UGet~E*X6A7>MYBUMje_ zMj`1oN7JQW{Q7^6XUT~YOEg`fFw;mo_OPLuu(X&sDR_4Xfj&i^DhW5M{DJUjhUC{^ zm~by9dkhkPuKV8u=WDKFbqWl7++k-9_Sfjx)gR>z-rDJ+31wsmXA)K4EyKQVTz0FJLOwR2dKc68Yo+iT0?~< zIJNgu`iTJbiNO`J{xOE50+=7IX+jyEv>XK0&1*=ebcjoD055En-l`|LvgmOi3%a1( zf9i+#cgxz~1=oCh0{dFr!kFaqvgH*Ni{rSN9W}#D;fDAsLCFJ<6t93<95Orrgm~dS zuq(_zz9lnbnKOqih^ZAi8B;PDS$HtHuxl;d5XS-K0qx9}6Sp5-5rhgOLq2y42k=s1 zO@C8$Z$oUpM5Dx{#e0)S>`B!^qP31f%;5b`@BWTjb@BBze4hA|>{QrY7NUJcRQYy% z;2qt7|L*-SJ!slLOM*m70(7cv4l=W+Cv zfj67Ju5rlam8!7Ia*_TEr8z0ndWnaz1=r%V9c!qrsJ@V)9UGkYbq3-uALP=*Ti7XQ z(^QonYmgGhgq(p1z(y?8Tw=n>ygI82UB-{_<8LueV>_NAUGd*2gom-ST2oN{v-lfk zUXD8M!9*YQW1!T_vH&UAz?CK|#bR;8!I`N!LQ=4pG6dj4$X3w*2KgUT=rLYR1%lKh zu_ifSXeC8~l1)|}ptUqNSwsy`Um=1W2j=eW@0Q=T#PxwcRP`g4N5v+N?T{n9wEFya zu_~Jvm@dRVJh6RX+C^B{8_+|O61a<*iAHhNTvf%{oMAP7S&|`#fi)YZ42hLRhRrFR zdi?adzAo6IN^3ug$zf^R$?EQ^g6Kg%3L1fSj88DctHu;i;Ac|F7!gl-)}8BG4JGCc z3q!{|U3JH=xlgl?3XcJyjy>Bc{f_6~CuuMJw3Jm=aUgd#f~;L`Q;=dqA<<0h1B63k zDH$}jO!*4QpB|o5JT^~G$T}ube0%{V{mNSi?d4RMM>eVZTv$#>l#C=Yi#TG(OGzmh zH0YV7vS9MzbN%V7mn0Y<@Lq?9sbo+sZcET0M%7W)uLPN1EAcx-lLl(YHhcsBPKSGU zq9@08;`q^w4`ezvgVNFsfRxIe$5P)vhCr0QnSIZlq zm0q^Rd`-vQn8yja?PXge)uqm?K~jr2UTJUuT}n+!tGv4g&!Ov0r_V<{#krq4Kwju)7y!CM;W)= z7(J=8y2k~}ZoiCW_9mX8C^HevmJBehh3MN_126i0h>FbllaOOYL;%l23u2VJc6V*j zBI7Z+;MAEfo4Y$+&qcbjrr_V5*D$i!Ssr4qB9QTO5Q1Wx@efEiic##bj0sq;)!R9l z*ah(kot`yYxEN&B$X%eg@e-ttYr*wC@l?1*QE--U@^A=BV-%eyy#R#Ida8a`tP($q zJ?IDb5kUAF4FxE09EhX}=U6;5@TlmV31^CqDk`pLhb;;_6Y;)>xEF5*Fe#vBQh|D~7$R z7(mOW@A1AEGVx6@=MyB;skNAF*+F$s=Gds6i4D$Aa|ZEkRF2-O#>crs7Of%T^>`Dh z5o&j}v=SPK71JFaHT{L1-bx{=0pbzC962JK6JOe0xNQsB2F5{H3oo}81COZ+5P0SW#;ort!}C8e4j2s#9)%3 zrty|_HvOt;Brcv&3OKHn)rNN?)usPga(CCnRc=A^cYh*B4NT?T-^MkpOx)Pqj~+ZI z2_4@+lxxvKYd^fcMFvH!h3@SE1t5tzo+G5co%&U7b+V|E9SaIh38G|DD85E0-$0;3 zen@#d4=VAz(b2zGerRcu->85rW}$z%MJ*Fd0L5G_mm7q6S>}dIamX-R$O4X?60kxg znwXQw0n0o@AVnhL@$pMge#|+aWUO&W5Qr#9x)W zfATaiOsR|ZN_Shxy=a*FxncAq+YjQ8yZlr1uh?2RHR)>8p~FB{*RlA9YJXPRRZNOr zXK;Jc{Uj%;y41}A=;55mhEC5AzHa6EbV@LLKJ+|D&rx!da$~Her^Gy2o7{?!fFrgy zMoLIMx%fD{v3M643~4&Zp;VQ^dmaj=Ea$0R?zJ{S+fdE)G{;QU8RgY+s@8RMd|xk_ zJIsUWuS|R!)mFi_K*n!M z%i93I%FMxmqS!V;#fhYSMZ8*7Vgh>dny`b6JX!CRJc;;uo1f}V8J#5x1{vab=Bqv& zP_R>?V)^J(f4jP`WSYe=xTz<2pf$1D59s68rMsY`d+^7tB-GYa>|bk?Lx>yUNO|QL zG~@K~!jV)X0O`>9Wla(L>3xb=h~nMb|EYY(Lm`$pyNJKh%;WACvElp$h&`qzwISA0 zPBNFZE?TpnCUW|t*lGHzq@-lLx|8d4<6zmOm=c~hHv%Cz z3o7>tePb{fW0x;6b-6i><%%tblN662$_E5@)wW!{$Bt_q1wWJ=YZ;37uA&L8w8_uZFIwMM^AOc@!kf8Le1;h|W8?1a50=_G)rd;9 z>-}K9#}wK%)ZHkb#Z<*uoTtm3TTy770SvNrE<a*ca@ zKAtwamzqJTsQ4yUiK^3LBgQCHAUczy7A2!OJI$!rE{97cbnw<Trt@0RhHnZ^Axg)ArtmI@S5DQBS>km!K0Z9v@P(P8^w3Y$( zEgK^Hgz&ITV%maDXb@u6zBz$a!c=u>dRJFeG_Wm(7(6-+ehAAsYS4=?Snw(z6Y_aD z#@Nc|>3}l|iONi+ue_?O^79CST(@Y4s7FUlRZ4RXX(i)Vui(|Sw%`X!qENz@mpDk> zDNJ<9N@ZbL5Yl3{FCh3U$t!+V6$qf}SrmBs`PnM4JE5r*4@*qWc& z-7U|1`=K)5|6k?ZKgRb-O<%5bbd9HkiAaI&ynX^TD7!Bqwj4lKX0@{MT8keN)^GTs zLW1L0@n&)toL3TUFzsuILxg#3u>wIwXn_hTh1=L3^>&j@fKbF;OOe>*vC+#LN*@~& zljJ|KDO!nyE$mp&g7aF{Z77L@k_u2=A?g&PjR)z}Vc2_RDGY|o7xN<_vfuY-S)NI% z(}}DDA}5!1lhYP65Lw1!5~_5K;P_ekO*(=*WA$eF>uC)7o8|kKYi=5(P74r!{vXOe zcVq>0Pp$9N*dfCNA3sncm7fBFB2FS#ynm-#2{@YEb|2ynK*w}Za;e?9wPBzrUNJrP z=ZRGlV89Uq8kdE_Or37bUbq2QI6h4*^MV?6*#HeZqVnWnEc*LjEY%WJVK=fW@M@B^ zMWO}*gHhz83Rj48hGiM=B?wNqiK{0m6V(+K0>Qk~=vBViVX@8CT1?+Z^a)qJao;V2bCL+Me< zC3ZDDfY9;iO^k}(2$afeM^&JV#UE3>P|Q3@cB&as$koz55p$v01|uQ3-0d+{Y@u*d2tPbQUV6w4(SI!CrI z@!~^MP0A~Oac{rurc{VkfQ?4=R6B)XDo8`n|JH(n2scQpUnMl1>XRw#7jmyKgJY2h z#PgNW_|jWMdbz}}y>JEd%J`jZ-+89;Y%}ZZ`JW?awIfr!f{-YmSjw~n48l!Mp=>}` zdXtS}zvtL7trr5p@NDZU6++}2Kfa4uJsc;Gp@thl0+y^R^@YnyPdabeT~vZfyOgKz z|B%EHQy&KvhHx>LZTVIdh{6o;nHXa(L@*^Z!Jb&&AfPcfQ4>?V0=>z>$GnKo;~m0Q zbSnrBf-4^JuxxhRN2ITl*|A$DrydX?K9FRv*(LOi7}BE7pD(!W5t+rbsX0kkFO>{; zTbTnE2ieZ*oTvrs|Bqe7s(!W;jp=)#RZ z3)wnYZ()aT(BsMrsZ-1~N8&<@sJ?$&-u==Opt#A5P`A%@PptgJb-UDnZAz|Fj>F?z zD}zo9Za zfcbbt3AmIZTr2bA*Ed0fFXAKdWv!N*7MW;ZVnNZM{0Nr8e+K(-L%Pd%7=cNTQ&URC zre)xSRzZu*8s4mB(kZBE2Z^P zP$iDvZAr;4H%#j!0nbn8xc(X4LKKzL5>hpR-E*C;T$OlXaP$NB_#^={hiop-YUiW) zJ90+a6xNMJ9(kv?W8UsPI=;m#sWo_PeYa0wj{S(IDV&Orn7hypbYHwztZeX^iU;p0>U#&+C>>_Kv?Sg!0v^;lQp{=?*0DV<8nrtz7c5a;$1BZdzBChrX6CMRmFa^7ys$-H@4Wgmz$&dS}}ZGPc!jRT9B4|0*fXy!6 z3OV5MbbdqMYcC7M_>jDb!3s5o(5@kiLJ331?w9qrYW~I7%YPsL{e4A6bdiRx+# z!#2J~Sn3yW#q0n5O$>H=Sj&qi=Wh?xnMs>P1nRMLwor>LBUz0EJYf2!PJPHlR=96k z$3VQYa@JIquoBd>DjTS9W(=QI6gA4GH^9=gxmIeDhFcUPO`}WFX&$ zC~TXL`kd#wSi&(B=tw@~fkGc(8M)TzQxTOLQv9a0fHl}Qbh36S0K-no>+!MUR4nfd zRp{w=>jg>kb;xLk0`L9!0flPP3HVj7@73$Uh^)mOJ#(Us;(W3F|7D}cECBF;=W~<5 zYDBiluwVNFsMcFOEA=spSj)u9iq2M*P*g6v#>jjBM|bq#`O~<624 zONU4(x%kmO<~8n+6q!g9tYBIi2eE2*Z@6>6) z+OQ{->DBm{EWlbr(Hk#_hI2sow-^JQymUKL=True@~XWA2!6Juw`jhbt&W=Klvxnn z4a&jv2Qk*#t2ZGd7+=WR%XVSlqM@DrI>~Li+iMl(N?pE6-ROzIR7@;vj@#0z=5z(( zRhQY%Tt)Fb3u#`tEt@S4_XO7^K37r6cmL?+QfX0ACCELuMtmh)R%g*}6B+GE)u^gclK=P)2aXmsFY@M3Cg9&aurDRcKF^DfDS;3ML%iwp;g%{%)!9G$FG8>6cRD!l*T)&;tKJQD#wMh zco0TYg~RJ4eJ690+ZK~>rXK5`Kthr7rlW3Gr#3{h4gk?<5TR8us% z9me8vXNb2nZZdKVlPYb^Ay$MCqj;cFuBr8x(iYUS^}bIN!%rSQaw?=G200b99A!m>78zc|vcu&%PPHa3=wHC1waTa*dZl}2UwWJB z0R{!+CnYa#PaJdaqRzuBuf!VbKWy0BlnqS~*N&`~&>U4;r+L%eTP#>pJJ8kbHj{P8 zcSG8LqkP9gxq=E)YO%-EAVJQE$NnrxPVtMUb}m{M2Y7G^Jw}ey|F;H0)d;!rk8sOZfS_KZ(!iCsNTZ z&Zi5-!uI>*5XWDzG5xWV*;plJI4Itlb_#{C55i4n`tpt6m`vR{U*uSxmR_?x5;=u-PZhsH5W>bThMHeYXLrj%A*Kcdii6VuiZp||gJe*tSbin;*$89ET$kP3e zkLLDRDzt@k7|5MaR9;=)s{I#Zlido^*Q}g_L0OAEg=Cd5BdjF?>@C2m+vOq^a=AH! z@FEL78-yFOwL^=nmS?d~0u*JzD-*;Ai&lc-;AZook_%Xb^hLE{Biqp3XGpx~%{aKp zre&~rqSf@bhj&B}eT0Od^mOiGoog!-I#@Q5RhhZe(D|G*i2p>i*F(jzks`_*0(ECA zZ(0Ox0~O!EDpD^}o0F4@6e~6rGQTZ6E)Ld{$JC?xC^mui-GqjMuS z8i|j6BMdv7wWLqB{8OJjz|7id52b3C9?Z7}!u^VEVSbeFj)fsHCBrGW4|7$MiFuF? z#`U=FA2%`6+jzKkxx-^yD7EpCUvx2~zROk4s(3LyKPePYPNqw*Mh_FGM&~H+o%VI=$Pgwm!`V%fM;d8ct|kGfjXibU(}$Zk1y+J+P| zM}3wAV4+>;yx?(cLG_5A7EF{_8JdSB?&f&12&5h#d2Y2Y(T{0LgP3R9iGtau6^4xr zt@nTV?w{WMYVq%~_&+Z?->sN$9A`=~r#}iGMKSm{Y+p6UW1K8YGi`KLtTcKB#yz#$ zTXGao(o*dN`GUIV$u9Cv5a@C)A_xF4dlPSZP#!+^`KHAvLFDvRYQl}0&3X6sgYv`p z;udgJ{5SEQ=72H4(%ZKB+@k?Ch(nI%D;TGf?)R?LMo-CtB9{DSnojLg46d2d;XSL5 zDOCJP{D@Pbp5lHj3?Q@@esUaPtOwl(>6T*#6tHU7;UQxPsX=zphw*UKlETz}(yN*R z9zUaNd9HJy2{}z~;z=D4reUhZiJO8HfiosJT5uLj3SQwAVU1X`qmUnHmob`Mrh!c! zFTiRATlST}(Zq&2ohMI@XiM&8&52=YR12YP;3G7KA2GyM!Iab08ShW|C90M3`nUlT z;8hP}Xv8VE%0b_=5MV#QN+=;EWvD!C9uXb<9{S38#Yo|{1BJ%ydIC05B>fB;1#o^g z`Cgz^mLf5vrMA_fEyGinX9m`J=xI_K1QQ$7;v^wUTB}Pr$0(VY7S|%;)x6w8z*rKR znwLvFv>G#Q&>~&-8}tdIajzuB{S+OToFbKL0wHXXWZBxEJhE!olvlut#bEUC+v|o< zuj!_@>!*s{C3P37T~rloN;f|yD%o#W7hd-x()Oj1a!ZE}<%0aqfrQatpn8yctx<8o z1BJXM+u-WIX%PKD;fTd`c<*AgQSe!8GttlE{=+wzw98P~a+)zY^;@gnIE4$b8Uz%1 zgga=ihMo`*Q_kz`Hjx`HlrJXOMsWlye9k#14-2&#ik{-(^qgao!DD4?poZF^AuyIg zde=1|9v`fEWj!XYDh*1>cl`@to~Z^5MZw*g3pC*tFeX_C5cZS_ zlZH>oiCXm*il9&cn`1ds z?Y{h?^lXa&d@N!vK998;;{pH=!GlH2uD5g^>?oX!oM!i#^3u~iL6N8R*lp}SZUMfJ zeH>?P7ej2-i$Az$AjZxgos`r@SY;p2G9ZDQDk~*sLDU0*cz(nR^-IQ|l z>Erkb=$?K^@e{W)h=f=0;5$o$Y*TPFItfgylk-lYPz(a08vM%!K86jzP8jB z0Klgv5XZ<=1Qotlti#<~6$Xf(x?7#Hxg9@MAH0p5x;`tO?e2AE6L2)$6UPpmWP865CXY%AqOTC*vNg~RWKuO#rx#2;ORgdN;G(h3r}NUp^; z#sFkwpHAXHY|jxlUYyQ3fq`~#)IjbeeZRZAyq)h)yKxhoy>MG&7t6ho)4` z%(`w*c-?Xav1)P-Ix#(ARq?Lzkou2}&8}f>t&eB)Y8_%wmu){*r=6Pr` z8Az)dx6u=m!B=$BtCSjT;5^#O;1F1wmCZIwPMx4w0z`@*T{IW7#?ag!If; z;ptji)J4mXOw4ZH6c1LGg>qCvNH8;8@@I$e$iM@gPtN%>Rl9QvG8qQ{Z(Zm1+t_uT z@%$fsITN6D3!@X;6MH;pU4SB~%M2+gqGrZv8w8|8Nwh9hL~-OXTA*oO3-mFsLW?+c z8l*uV(xA788TtR}@7qUfe~V7)haf|d=j_Yc>+-E{t<6x&d#j#)3C(;4lw_<)ov~B9 z{^@IUvtu#8~7vlSi=40=0JNa_47f63d>y&`ob$0Ck6z8DK% zYmL<)&L>W&Cau5xUBx1G@s%aO)K*uRT=1eI%UhCWt!mbkYz7EeP7l$O`=I(?lnDQ( zsgFmH4_4Laxi*DNx0jv>n(?vYuTr47*~=U}AdwGa7vKH^iKduMiEv#ufjmmYeV)0K ztn~k=63c2Rey>URPmICz=-BUNd$HB)!?KH0#U-yj??9X0z#a7WpW#G-?s#{fNoQ3p z&K5PV6UO=7$6f&8pJLV^HCCNMU^vPN&to@)UHg?CQu#zpu@-l?_%uHIqA{M-sJm_5 z{U{W}up~%ga$rACJh@%@gF3M>mm)!&qxkxh=KYV-Kmn3SMAZYr@l|w$Q{eBgyXq-Z zcLB$Nsb#6c!PZ$zmSPhp1@|Hb!2+thvA(9$CS*`%UJ&s}p%;5WcuT(0`2bF`bou9= z>_`n`$NZ0Vc#(Q)HK)~xgCbD@90U|`?VLYdeUozK-8he2XJ@A&os4I6&zdJ$$^HV4(OBi zm*WG6?!c$*yz*h zO$hOvB}BT*Z!grwF3vx#wR3U-k^+0~Mbkms9$Bfv_7%IBdf#MH0VonmMFGrfTr9A^ z;{^S!D-wnBoRlNL{Z&B*xd&(&`tXXkZFsJu*OaNRPlnR3o!Z zDK#oiu&WuE1F5^JW=Z6RUOZDy{|EIn3gwt3>qxE#bHbzS&0t}GtIJ-cKn3;5L8%cE z5L|9ru&)w?@nOQG_C@5Y`bV|1Br@re8aqN(iYG3r^CfI?o_s?zmeT1&1R*c5Y0*&B zA^L;Q=C9?2SVP<>PGmOmNlWMjtVqBM6)cNOPCW>{_fKgNVk@LR){XFnw3uZR$6eIp z8NZVop2M^7S3l(}$G_yXJB~bPauGV0l_?|W zr7S%k(piJx$82!Oq{(sQ?ZF-Avp){8PSwc2B z&M8NH$pUdTdCecG)qwNI+w{GOjiI&e(GgAK+~*3A@vN519D*kJtK2k;F+ZyY5$b=(Y4wB^IBbPM7FNS zeFWg&b=unoJcKOGJ;9DLXQp!6i~V!PB~Z(ON;1kNJ^=m@tkLYBuHa!!>1 zX10wyEhi(D6p+~SW4Qr%&p*@c5BQobBhIG_P{fh^rLtyRHqt)R`Lw6q5 zwH$vFPr_(iW&Cs^pD$S^_&OK^v!`)CQDSjUcAGVuMovHlH@ry;d>>pCQgyKndjB8! z3-mz1Q&o=3i=tpeN8aC(?^QlZNMlLijw`$331ofm7x^Id{&D#*cr}54+P+V@L@Aa5 zSCQirVzdK++JhN@e2lS&@m*4>Tm8zXpn11&oA~2|wp3m}Xq4zF8gJHP&2giI$H(Mq zipQOk-()^O=~6nK)Jg6q*@jfcqTgc9(mV4j<`24J71z{0YGRZY9rr4ZX@mOMb(m0|iXi_~A>OyT&V_ft5|47NT|HX0c zH??=q6#;T}3-RCMR)AdDv!f$MqOLZH8($quvV7Dx&=qN<>vWb$ijIb(J27tXtew3k zbs#~EnfqxB?N6J(@}UudkgEW!BxMP?HcCds-DM#X|MQz)VPfC>Ds~4_L+#r_`8=fQ zf#V4d1{voGlfu!h?$j0v5BW?74Aa!#jVDqsIkIe(Yblw(kAV_v_g5j;N5XGahwLP8 z2pAz(@*ca4f?zjRXGHFTIL9wi8k@^jQr6*!11PLJexlASI7wckRu(Y}a`>$$nc5j4 zYp#tOEzb#(k*d(xZ-3f+{dXa}L>gRX5U(j!%(Gg&LXgNfNcm|xVNB&Yjt*r!%}*$A}L-={2?VMb8Ar$cvlm73AdX47xAyNsc=O< zAK$cqm>9-FU#j23PM)T>QJE#kd{NbFX;go+@X}G6h|_^i%pK&qC%R9D%nYDlETHJM z)WZ6N^$>Z|cAgpr8nzWqv{h3%FT&@%u@@oq)j4m~}IH&UNiOg=#6_!sPf<7#44#@^9@&SI-}nq&EA{s<-6x|=bH!ZAvxrd2rf zfykMK@uZDWt~2Qkw*(w@kz*1LC7fSTn&R;V1$hYt8*%BUQVC#ap1F-jUc0uy?6+ztAIN}&EIUO|Zw*wQl2i+~%QpxAFY_mJ)$B3cIR-8yt*jX}8SgQiUpoZdQLoVOg-08)7 zVBKH=VZ!5a{CzvVOIRliBPI(R-lsG1AWr4?@fvGkE8j(tlM+knc}Im*v5(SrrlP@z zBohWdtVTf#k-)ye5`x{sJFZVE{*4ROv9qCKU2KzD&Q$gGF+9*1AUm7s^<-=DgjZrz z(44j{87`2IHMhD@Ejd>9%}cS1cCPpYP?omcbHgXD?Sk+Tr375x{@|VF+ZL)D7%ioB zX!xhBg;HyzU+#-@eC*jxsA9^*`hb9O6r<}c9BE!|aZ9~>;ys_Ml{R2el>id=Ag6WR z@m2toh4cBfDyban;WTZ?g(UHn-k2}sY)o4t2AjB%cG#g|OSWRynY1VW9 zi_BwPnBXaylGoF%BJ<=-4r$PGpfcqU6a3T+8$%GKvzaFvUP0~|$#pSRU`O~VH}R}e zCUu1pB5`tL@h6-0I&M>W=N)j)x*0@{T;105?mlgv5B#tFW?J*I*y)c$1JYyUiN&4}dQm&`q?8Z^y>K9Di$_Lw&uQ5jtM?7GMD)N>m4i z_wVsLkq^>l^ij^>k)8-U#7R>sGnS`DcTjkfuOIRECoYb9m_x$EJY+dd)EX% zuVgrqdSWu?DUX9kDvr#weuH_^Y2h-KOIP4HS%Vo`rdX^)X!g0s2>C#{1R31AR+#r$ zJ&FQ8{jfc3d)l{a)lSuIK=ab+!amf8&EwQF617_{?6mF08RB3BT1UTNFDnLXcI8o$ zn8LXyjfjDbO~1chsH#NH+}rA#N-&B7ZDYY6 z(NqxLZuTP~58}OLA{Ip#5Es+&>kPRza;sYjl7Jtnre`E1qAY)m@Fl&s6VscD1ukW> zw{;G^Z<>~d0GK@ z;*bIhoF_+Aum>mQ3tN|iDOS~XW3Mb(t-S4TZ#4UxUePrIFD#l@z5~Dm<&z#WAktnq zkT`iy=@a@O+1AEx~$eVM3T|G;7cOs3RYd8-e@6r@7@a zod_pErHeN0;Z+Vy)$4B%vIU(`F9kekb)YwHu09YByM&I+M5({j0_bkVw*YNzV zY32~?0|9A2{y%N#YZ$i9>4?w&u=$fevzXJmsNU*@OU~&%n%=}-@ByF>-M_40z)sPm zWC{^|zjoSuY9oDt3n85@&)#Ddr@hS8?*;-G#NsG=J)Ks%1!8nH^CPJMpg)wjdfBJv zrdKRQ66wyMdD;2?$ZuF{y4_GygQlik!5L@|8*5I78}}?G2{pY21x(iGxH+gaTEaIp zOZQm}yA|HL&YZd<9|vsC(GkWnS4atha?J`s6O~BuFiZ>KM)h`;x6T*dVnxUXPwAMH z!W1G@JZ!AOERlbA6x6Z$Hv8m}SYRZS1}%AR`>E#|vN*q~lDOS}*^E4w?dgpYMg$dJ z-A*&=#3BkCt$8oNNm2Sz^$IYF`U5bitUZ284-rcU5d2|Ttq?z$k`e=BMgXiLO2yyR zMFuCx4ZEo@q#vQ5crz#J6Flg7NRoiDR|2D#U_A^gsqWn|S+xplUlP`!#QYFqMM6x4 zppF+=nPT=ywZ?uT9PiO+KNbwIfDWlX%{_I(Ss1vaRXN8Gyht^18EY*qmiR0lB=XUD zBH+Ug#(F0$IB6MWNs+9fwyV}=qn8X&?Q~Z$> zmz<^e`S@7=3|$9Z%R$>S7=)n&>6ZQ|&9=?+ge>=P)c0astS#yxcCD)n3MxAOitYfE z^^>HeIAAd>(CA@fm!A~YBTI~2ae@-*2R{J*j-s0w3g0=XCb~pusluN_%^kC1<;MH) zCWx1+Uf$Vg_H2nqzyrGfI=)FiMH_n6f#kKxv^%aFNe2u}QeD$0{2oFS(KJ*5A#w4=&KUVQFxLML5G zr2XN3dtspZ1AAakp99sB#`8XN!_BDgwdB~PvL8qQl+LnMK0csu(#7x(Qkb(qGZj=V znZ{i!>VLa?iIzpU4({t&{N6lW6 z6qStdqFC6+#4Zo+z3K0K0(&fjBiucVnyn~nxr69<8jCftOh8T6* z2ZR$gsj|ub?674)pd4bTnA2JO|D9%Q;P0yJQBaWSr{csWv>rtY632jJL_IWmZWx35 zCW2O@{8Ze~1-8%)tMO92m71!=vre^FLy$lStG)=PJwE5-v*Zb0&3$D}>}(%{opS~l z@Dx)>djc5R@g@{a; zC;|Y6OpB(#NAL7hK{#bH@VFDjVjcV%kS;31ckgYJOWRPa&;|dd}Yqo%ft02N)h3daA-page$IGHcZASQ!XARPNaXUC{_k(e|+It_EcUISfUt_d{amMp-zNGi#eW;$6yB>B! zZD_5TDDD(>GvBAqQH_kO?F&t|+2D8wx@pkNN3kiu13TOMT#GV@ZJmGdx~Ub+^J8{! z;?_&+-2Gg+M+6-k-iID4HAb#nk@9F&$8v>(%b;oY_g%lA^Ns&xA5k#jn_m|E@Xaq{ z7O3^oxhh@<$itbBArIM?QOc~7n}P`JaF4Yu!LKZem$WNB8e3GOC0XK3 zwY@@Hm-ACMNsTaMa<1%W@cYtYsG#97Vp+(W>P=IZ|JaMwUZj=!({%+E6Q$U}+QTk! z_9!#($_)xt*k=K50!Gt+pIsCuJz1{j>pN~k*Z;KHjWHDOSs16yiTt^8=XmA-mSuzW zTR|84QXkcQP%u~NG!wx89>lL5^dm%;TRRG^C{kIMh zeW>jzwz`i(otgy#i>V$O%Bg;#xs>oa; zq$Vq7SriMqOj$q{n5CB0b&QT=l(epwq$M_##&NNwCo>F3Oo3c4k!Ji0f0gh!hNzrk za*WYT6SOCMElj86sSAgsTz4z=YF%sMkWTlRbM%L#1&&;_aOZmAblx7L+{3qsU4o z=6R`J!Wo#R1(*=Nu~VdRs=uh(R`9Ci-RTC?-14SVTH=IIe73SdaM(y^FvbFQ&~FBPFZ)m_Y??G&RrV!41jOT(7huDS+Q%<$ucJy(KIJck=u?!6 z-yuzwI_P&WSDDd>*K{_gkxGKpC_Yy=mXr{~9?~~wr3byTV|)@)hgli)!py2e)AWcM z5-)Shf#-f3Ot-(b2LWJwmf-32Ldcg#VeKFwg58fdb;YC!x!@4tYqQ-IK!&kd-E6Ee zzpb>J((Lu)SQ+vacv}?+Lx7fxowrU6-EugVbv$D(++r5&I&ay$ZX zLrF;@mTvfjPG%ciq-Mq zR2lLj2>$+vpV!5MNk}KX^hV^n1Wz>zfW31#kH^DYNoV~>OPq$*o%7?l1tRA?ov#Awt+1^zSo;U?)b(uG&m>P^3BULih~VOdqQ8b{v`WYrE67osD-iQ4}x z#(Ojx@IsZEgw`ZjP@WT((GmVU`E~oq+IcNy6H8^h^bfiMMX*sx*^B(emV5i?9D~m06x6YOL%00j3#0;fPTd;A-C`Q1+YEGE%@M(osfR8n_#G8U{1?5K(;JtlG)8MwR`bvlA%>q z5Qz1f-kwLVytFo^Nn2k0*iu2_cdI8A3nPG7B|0IM%md?_%ZSs4){y2B4$C``@QF&$B(B(iyc+_3STMn@SWzJhe;xkhJw_G z%OMMrbRqp#tNUNN9i@B_!nCY|BdwgS23g~8x*j*qCc&Eu#(naTPE4g*Umk~7{s(3ZW^>ceiSL2(sx(PD-Og>VsoRbZJU1v;jee6qOl* zXe*Ejm}jx7^u^E-u8A{6SsvJ8Oh27`84ReBkjLErQ=tHUHd-x_Jb>81Cu5oJZl9>w zAl?a3U$lmg6c1fUxyMA*<~(1>cRlAa7$V~fSl+3~`66b2t($nA{Rl$%NTjlV>P z2}&1=U5qcWWKazcPD2>k@6`CPFn1^88isIBbs}%hMb!3z#KYDVZ1N7+U7z{gy*^=l zk7~@Ao^0`l)rgHWfAsJ|JS`fo_)Ufj0+QJEyc-w!8mo-u#ar0(=qo z{$=WA2ixRf&>bxiPvGx&27`E^93iplV`a$xKJui_!CH}Ih`I57#i=mfD#c&P#pfRF z+h2eEV<<`fwsI!3)T*olW~d&L$|+TU8praYK2h=h%)5y!%hnPV#FOn5kc7(4$n_jhIZumvYzd11adN|&ry%UhS<((0vpH>zE-p-Q z!PClTnZkMbx~Fvxy8DgAsR1ETK<<;pEq)NW;2EAJ>fu5?6{SGDFLc&nxTSJ9v@b_2 zYpJbfcj$!^P?5Vam&zI4Hmzf1ZAbQx1UD^wq4x4Q>}IZg;tBeHYw@OmB6xqh>Z0UeI?M5#a%!S;w|E! zzqF#J3M(fl1nZqq>$2i_sK!B9sL;xKQt3tV=Bs$9?$3N}12{}B!UZcsjwPq(CW5=- zrz29d6&uh!4$$zKhF1v>UN9YESCrEY%-^t~YK+#u*DIRk)wlaga|g zmktV-LM2w6K%~ujPY?g#p7(Q2YrqTC{?)_d Date: Thu, 28 Dec 2023 11:20:00 -0800 Subject: [PATCH 65/84] Fix OpenAI server sampling w.r.t. temp and seed (#4668) The default values for tfs_z and typical_p were being set to zero, which caused the token candidates array to get shrunk down to one element thus preventing any sampling. Note this only applies to OpenAI API compatible HTTP server requests. The solution is to use the default values that OpenAI documents, as well as ensuring we use the llama.cpp defaults for the rest. I've tested this change still ensures deterministic output by default. If a "temperature" greater than 0 is explicitly passed, then output is unique each time. If "seed" is specified in addition to "temperature" then the output becomes deterministic once more. See mozilla-Ocho/llamafile#117 See mozilla-Ocho/llamafile@9e4bf29 --- examples/server/server.cpp | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 72dfe452c..c5035e202 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -441,7 +441,6 @@ struct llama_client_slot } images.clear(); - // llama_set_rng_seed(ctx, params.seed); in batched the seed matter??????? } bool has_budget(gpt_params &global_params) { @@ -921,6 +920,7 @@ struct llama_server_context llama_sampling_free(slot->ctx_sampling); } slot->ctx_sampling = llama_sampling_init(slot->sparams); + llama_set_rng_seed(ctx, slot->params.seed); slot->command = LOAD_PROMPT; all_slots_are_idle = false; @@ -1215,7 +1215,7 @@ struct llama_server_context {"n_ctx", slot.n_ctx}, {"model", params.model_alias}, {"seed", slot.params.seed}, - {"temp", slot.sparams.temp}, + {"temperature", slot.sparams.temp}, {"top_k", slot.sparams.top_k}, {"top_p", slot.sparams.top_p}, {"min_p", slot.sparams.min_p}, @@ -2437,26 +2437,33 @@ json oaicompat_completion_params_parse( llama_params["__oaicompat"] = true; // Map OpenAI parameters to llama.cpp parameters + // + // For parameters that are defined by the OpenAI documentation (e.g. + // temperature), we explicitly specify OpenAI's intended default; we + // need to do that because sometimes OpenAI disagrees with llama.cpp + // + // https://platform.openai.com/docs/api-reference/chat/create + llama_sampling_params default_sparams; llama_params["model"] = json_value(body, "model", std::string("uknown")); llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt' llama_params["cache_prompt"] = json_value(body, "cache_prompt", false); - llama_params["temperature"] = json_value(body, "temperature", 0.8); - llama_params["top_k"] = json_value(body, "top_k", 40); - llama_params["top_p"] = json_value(body, "top_p", 0.95); + llama_params["temperature"] = json_value(body, "temperature", 0.0); + llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k); + llama_params["top_p"] = json_value(body, "top_p", 1.0); llama_params["n_predict"] = json_value(body, "max_tokens", -1); llama_params["logit_bias"] = json_value(body, "logit_bias",json::object()); llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0); llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0); - llama_params["seed"] = json_value(body, "seed", 0); + llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED); llama_params["stream"] = json_value(body, "stream", false); - llama_params["mirostat"] = json_value(body, "mirostat", false); - llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", 0.0); - llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", 0.0); - llama_params["penalize_nl"] = json_value(body, "penalize_nl", false); - llama_params["typical_p"] = json_value(body, "typical_p", 0.0); + llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat); + llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau); + llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta); + llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl); + llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p); llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", 0); llama_params["ignore_eos"] = json_value(body, "ignore_eos", false); - llama_params["tfs_z"] = json_value(body, "tfs_z", 0.0); + llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z); if (body.count("grammar") != 0) { llama_params["grammar"] = json_value(body, "grammar", json::object()); From ca38b8d334baa724bd6c9402470931d26427466f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 29 Dec 2023 14:41:36 +0200 Subject: [PATCH 66/84] scripts : do not sync commits from this repo --- scripts/sync-ggml-am.sh | 44 +++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/scripts/sync-ggml-am.sh b/scripts/sync-ggml-am.sh index 83abe3681..93aad88a7 100755 --- a/scripts/sync-ggml-am.sh +++ b/scripts/sync-ggml-am.sh @@ -26,22 +26,36 @@ echo "Syncing ggml changes since commit $lc" cd $SRC_GGML -git log --oneline $lc..HEAD +git log --oneline $lc..HEAD | grep -v "(llama/[0-9]*)" | cut -d' ' -f1 > $SRC_LLAMA/ggml-commits -git format-patch $lc --stdout -- \ - include/ggml/ggml*.h \ - src/ggml*.h \ - src/ggml*.c \ - src/ggml*.cpp \ - src/ggml*.m \ - src/ggml*.metal \ - src/ggml*.cu \ - tests/test-opt.cpp \ - tests/test-grad0.cpp \ - tests/test-quantize-fns.cpp \ - tests/test-quantize-perf.cpp \ - tests/test-backend-ops.cpp \ - > $SRC_LLAMA/ggml-src.patch +if [ ! -s $SRC_LLAMA/ggml-commits ]; then + rm -v $SRC_LLAMA/ggml-commits + echo "No new commits" + exit 0 +fi + +if [ -f $SRC_LLAMA/ggml-src.patch ]; then + rm -v $SRC_LLAMA/ggml-src.patch +fi + +while read c; do + git format-patch -k $c~1..$c --stdout -- \ + include/ggml/ggml*.h \ + src/ggml*.h \ + src/ggml*.c \ + src/ggml*.cpp \ + src/ggml*.m \ + src/ggml*.metal \ + src/ggml*.cu \ + tests/test-opt.cpp \ + tests/test-grad0.cpp \ + tests/test-quantize-fns.cpp \ + tests/test-quantize-perf.cpp \ + tests/test-backend-ops.cpp \ + >> $SRC_LLAMA/ggml-src.patch +done < $SRC_LLAMA/ggml-commits + +rm -v $SRC_LLAMA/ggml-commits # delete files if empty if [ ! -s $SRC_LLAMA/ggml-src.patch ]; then From afc8c192919f04613a92d40391bff4c8cd99856b Mon Sep 17 00:00:00 2001 From: bssrdf Date: Fri, 29 Dec 2023 03:32:31 -0500 Subject: [PATCH 67/84] ggml : fix some mul mat cases + add tests for src1 F16 (ggml/669) * fixed mul-mat error for old GPUs * style fixes * add mul mat src1 f16 test cases, fix more cases ggml-ci --------- Co-authored-by: bssrdf Co-authored-by: slaren --- ggml-backend.c | 8 +++- ggml-cuda.cu | 89 +++++++++++++++++++------------------- ggml.c | 2 +- tests/test-backend-ops.cpp | 14 +++--- 4 files changed, 60 insertions(+), 53 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 526ce732b..2c3752067 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -614,10 +614,14 @@ static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_c } static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { - return true; + switch (op->op) { + case GGML_OP_MUL_MAT: + return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type; + default: + return true; + } GGML_UNUSED(backend); - GGML_UNUSED(op); } static struct ggml_backend_i cpu_backend_i = { diff --git a/ggml-cuda.cu b/ggml-cuda.cu index abad9cc39..9a9effcf5 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7485,6 +7485,8 @@ static void ggml_cuda_op_dequantize_mul_mat_vec( const int64_t ne00 = src0->ne[0]; const int64_t row_diff = row_high - row_low; + GGML_ASSERT(src1->type == GGML_TYPE_F32); + // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics #ifdef GGML_CUDA_F16 cuda_pool_alloc src1_dfloat_a; @@ -7577,6 +7579,7 @@ static void ggml_cuda_op_mul_mat_cublas( const int compute_capability = g_device_caps[id].cc; if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { + //printf("this branch\n"); // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 cuda_pool_alloc src0_as_f16; if (src0->type != GGML_TYPE_F16) { @@ -7614,9 +7617,9 @@ static void ggml_cuda_op_mul_mat_cublas( const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); - } - else { + } else { cuda_pool_alloc src0_ddq_as_f32; + cuda_pool_alloc src1_ddq_as_f32; if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); @@ -7624,7 +7627,15 @@ static void ggml_cuda_op_mul_mat_cublas( src0_ddq_as_f32.alloc(row_diff*ne00); to_fp32_cuda(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); } + if (src1->type != GGML_TYPE_F32) { + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src1->type); + GGML_ASSERT(to_fp32_cuda != nullptr); + src1_ddq_as_f32.alloc(src1_ncols*ne10); + to_fp32_cuda(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream); + } + const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); + const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get(); const float alpha = 1.0f; const float beta = 0.0f; @@ -7633,9 +7644,9 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK( cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, - &alpha, src0_ddf_i, ne00, - src1_ddf_i, ne10, - &beta, dst_dd_i, ldc)); + &alpha, src0_ddf_i, ne00, + src1_ddf1_i, ne10, + &beta, dst_dd_i, ldc)); } (void) dst; @@ -8035,6 +8046,7 @@ static void ggml_cuda_op_mul_mat( GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT); GGML_ASSERT(src1->backend != GGML_BACKEND_GPU_SPLIT); + GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1)); GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0); @@ -8481,9 +8493,9 @@ static __global__ void k_compute_batched_ptrs( int64_t i03 = i13 / r3; int64_t i02 = i12 / r2; - ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; - ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2; - ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; + ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; + ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12 + i13*nb13; + ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; } static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -8492,28 +8504,10 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT); GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - const int64_t ne00 = src0->ne[0]; GGML_UNUSED(ne00); - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; + GGML_TENSOR_BINARY_OP_LOCALS - const int64_t nb01 = src0->nb[1]; - const int64_t nb02 = src0->nb[2]; GGML_UNUSED(nb02); - const int64_t nb03 = src0->nb[3]; GGML_UNUSED(nb03); - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int64_t nb11 = src1->nb[1]; - const int64_t nb12 = src1->nb[2]; GGML_UNUSED(nb12); - const int64_t nb13 = src1->nb[3]; GGML_UNUSED(nb13); - - const int64_t ne1 = ggml_nelements(src1); - const int64_t ne = ggml_nelements(dst); + const int64_t ne_dst = ggml_nelements(dst); ggml_cuda_set_device(g_main_device); cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; @@ -8522,7 +8516,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; void * src0_ddq = src0_extra->data_device[g_main_device]; - half * src0_as_f16 = (half *) src0_ddq; + half * src0_f16 = (half *) src0_ddq; ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; float * src1_ddf = (float *) src1_extra->data_device[g_main_device]; @@ -8531,11 +8525,15 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const float * dst_ddf = (float *) dst_extra->data_device[g_main_device]; // convert src1 to fp16 - const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); - GGML_ASSERT(to_fp16_cuda != nullptr); - - cuda_pool_alloc src1_as_f16(ne1); - to_fp16_cuda(src1_ddf, src1_as_f16.get(), ne1, main_stream); + cuda_pool_alloc src1_f16_alloc; + if (src1->type != GGML_TYPE_F16) { + const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); + const int64_t ne_src1 = ggml_nelements(src1); + src1_f16_alloc.alloc(ne_src1); + GGML_ASSERT(to_fp16_cuda != nullptr); + to_fp16_cuda(src1_ddf, src1_f16_alloc.get(), ne_src1, main_stream); + } + half * src1_f16 = src1->type == GGML_TYPE_F16 ? (half *) src1_ddf : src1_f16_alloc.get(); cuda_pool_alloc dst_f16; char * dst_t; @@ -8557,7 +8555,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const void * beta = &beta_f16; if (dst->op_params[0] == GGML_PREC_DEFAULT) { - dst_t = (char *) dst_f16.alloc(ne); + dst_t = (char *) dst_f16.alloc(ne_dst); nbd2 /= sizeof(float) / sizeof(half); nbd3 /= sizeof(float) / sizeof(half); @@ -8604,9 +8602,9 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA - (const char *) src1_as_f16.get(), CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB - beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC + alpha, (const char *) src0_f16, CUDA_R_16F, nb01/nb00, nb02/nb00, // strideA + (const char *) src1_f16, CUDA_R_16F, nb11/nb10, nb12/nb10, // strideB + beta, ( char *) dst_t, cu_data_type, ne01, nb2/nb0, // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -8619,12 +8617,13 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_as_f16, src1_as_f16.get(), dst_t, + src0_f16, src1_f16, dst_t, ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, - nb12, nb13, + src1->type == GGML_TYPE_F16 ? nb12 : nb12/2, + src1->type == GGML_TYPE_F16 ? nb13 : nb13/2, nbd2, nbd3, r2, r3); CUDA_CHECK(cudaGetLastError()); @@ -8632,8 +8631,8 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/nb00, + (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/nb10, beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne01, ne23, cu_compute_type, @@ -8643,7 +8642,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const if (dst->op_params[0] == GGML_PREC_DEFAULT) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16.get(), dst_ddf, ne, main_stream); + to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); } } @@ -8682,13 +8681,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) { - if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) { + if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0 && src1->type == GGML_TYPE_F32) { #ifdef GGML_CUDA_FORCE_DMMV const bool use_mul_mat_vec_q = false; #else diff --git a/ggml.c b/ggml.c index ed56e60a8..a9e1ea9b4 100644 --- a/ggml.c +++ b/ggml.c @@ -9687,7 +9687,7 @@ static void ggml_compute_forward_mul_mat( const size_t row_size = ggml_row_size(vec_dot_type, ne10); assert(params->wsize >= ne11*ne12*ne13*row_size); - assert(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index f3df8a8c6..b115299c0 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -350,13 +350,18 @@ struct test_case { fflush(stdout); // check if backends support op + bool supported = true; for (ggml_backend_t backend : {backend1, backend2}) { if (!ggml_backend_supports_op(backend, out)) { - printf("not supported\n"); - ggml_free(ctx); - return true; + printf("not supported [%s] ", ggml_backend_name(backend)); + supported = false; } } + if (!supported) { + printf("\n"); + ggml_free(ctx); + return true; + } // post-graph sentinel add_sentinel(ctx); @@ -1505,8 +1510,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op } for (ggml_type type_a : all_types) { - for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) { - // FIXME: CPU crashes on f16xf16 + for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1})); test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1})); test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1})); From 38b3de4658292582a8941a2be5c77b40ce6ac0f2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 29 Dec 2023 14:56:41 +0200 Subject: [PATCH 68/84] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 1ec144116..6ff2d5233 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -76e7f47b69e8334384dc718480c496dafbd47999 +168c43edd1f85ebdecd4c79262cacb32b74eda68 From 441f51dca004debf8b275f1bdc08e0f1af7fd8f8 Mon Sep 17 00:00:00 2001 From: Tamotsu Takahashi Date: Fri, 29 Dec 2023 19:23:27 +0900 Subject: [PATCH 69/84] ci : build with CLBlast + ggml-opencl use GGML_API (whisper/1576) * Build with CLBlast * Declare GGML_API After rebasing, examples/talk-llama failed: "D:\a\whisper.cpp\whisper.cpp\build\ALL_BUILD.vcxproj" (build target) (1) -> "D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj" (default target) (14) -> (Link target) -> llama.obj : error LNK2019: unresolved external symbol ggml_cl_free_data referenced in function "public: __cdecl llama_model::~llama_model(void)" (??1llama_model@@QEAA@XZ) [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj] llama.obj : error LNK2019: unresolved external symbol ggml_cl_transform_tensor referenced in function "public: void __cdecl llama_model_loader::load_all_data(struct ggml_context *,void (__cdecl*)(float,void *),void *,struct llama_mlock *)" (?load_all_data@llama_model_loader@@QEAAXPEAUggml_context@@P6AXMPEAX@Z1PEAUllama_mlock@@@Z) [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj] D:\a\whisper.cpp\whisper.cpp\build\bin\Release\talk-llama.exe : fatal error LNK1120: 2 unresolved externals [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj] --- ggml-opencl.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ggml-opencl.h b/ggml-opencl.h index a92b445c9..44d05bd64 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -6,19 +6,19 @@ extern "C" { #endif -void ggml_cl_init(void); +GGML_API void ggml_cl_init(void); -void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize); +GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); +GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); +GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); +GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize); -void * ggml_cl_host_malloc(size_t size); -void ggml_cl_host_free(void * ptr); +GGML_API void * ggml_cl_host_malloc(size_t size); +GGML_API void ggml_cl_host_free(void * ptr); -void ggml_cl_free_data(const struct ggml_tensor* tensor); +GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor); -void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor); +GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor); #ifdef __cplusplus } From c8255f8a6b2a3b3ebc6cb340cc2487f39fc95ffc Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 29 Dec 2023 15:12:35 +0200 Subject: [PATCH 70/84] scripts : print list of sync commits --- scripts/sync-ggml-am.sh | 1 + scripts/sync-ggml.last | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/sync-ggml-am.sh b/scripts/sync-ggml-am.sh index 93aad88a7..91478f177 100755 --- a/scripts/sync-ggml-am.sh +++ b/scripts/sync-ggml-am.sh @@ -26,6 +26,7 @@ echo "Syncing ggml changes since commit $lc" cd $SRC_GGML +git log --oneline $lc..HEAD git log --oneline $lc..HEAD | grep -v "(llama/[0-9]*)" | cut -d' ' -f1 > $SRC_LLAMA/ggml-commits if [ ! -s $SRC_LLAMA/ggml-commits ]; then diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 6ff2d5233..5b6a440f7 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -168c43edd1f85ebdecd4c79262cacb32b74eda68 +df098ea908764cba4a4889a1cbe7b026b2d31a14 From afd997ab6011dfefe9e917425b04ef4d83614841 Mon Sep 17 00:00:00 2001 From: Peter Sugihara Date: Fri, 29 Dec 2023 05:58:56 -0800 Subject: [PATCH 71/84] llama.swiftui : fix infinite loop, ouput timings, buff UI (#4674) * fix infinite loop * slight UI simplification, clearer UX * clearer UI text, add timings to completion log --- .../llama.cpp.swift/LibLlama.swift | 2 ++ .../llama.swiftui/Models/LlamaState.swift | 27 ++++++++++---- .../llama.swiftui/UI/ContentView.swift | 35 +++---------------- .../llama.swiftui/UI/DownloadButton.swift | 2 +- 4 files changed, 29 insertions(+), 37 deletions(-) diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 464fb3277..66244382f 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -1,5 +1,7 @@ import Foundation +// To use this in your own project, add llama.cpp as a swift package dependency +// and uncomment this import line. // import llama enum LlamaError: Error { diff --git a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift index 3393eb242..17cb5b9dd 100644 --- a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift +++ b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift @@ -4,6 +4,7 @@ import Foundation class LlamaState: ObservableObject { @Published var messageLog = "" @Published var cacheCleared = false + let NS_PER_S = 1_000_000_000.0 private var llamaContext: LlamaContext? private var defaultModelUrl: URL? { @@ -20,12 +21,12 @@ class LlamaState: ObservableObject { } func loadModel(modelUrl: URL?) throws { - messageLog += "Loading model...\n" if let modelUrl { + messageLog += "Loading model...\n" llamaContext = try LlamaContext.create_context(path: modelUrl.path()) messageLog += "Loaded model \(modelUrl.lastPathComponent)\n" } else { - messageLog += "Could not locate model\n" + messageLog += "Load a model from the list below\n" } } @@ -34,15 +35,29 @@ class LlamaState: ObservableObject { return } + let t_start = DispatchTime.now().uptimeNanoseconds await llamaContext.completion_init(text: text) + let t_heat_end = DispatchTime.now().uptimeNanoseconds + let t_heat = Double(t_heat_end - t_start) / NS_PER_S + messageLog += "\(text)" - while await llamaContext.n_cur <= llamaContext.n_len { + while await llamaContext.n_cur < llamaContext.n_len { let result = await llamaContext.completion_loop() messageLog += "\(result)" } + + let t_end = DispatchTime.now().uptimeNanoseconds + let t_generation = Double(t_end - t_heat_end) / NS_PER_S + let tokens_per_second = Double(await llamaContext.n_len) / t_generation + await llamaContext.clear() - messageLog += "\n\ndone\n" + messageLog += """ + \n + Done + Heat up took \(t_heat)s + Generated \(tokens_per_second) t/s\n + """ } func bench() async { @@ -56,10 +71,10 @@ class LlamaState: ObservableObject { messageLog += await llamaContext.model_info() + "\n" let t_start = DispatchTime.now().uptimeNanoseconds - await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up + let _ = await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up let t_end = DispatchTime.now().uptimeNanoseconds - let t_heat = Double(t_end - t_start) / 1_000_000_000.0 + let t_heat = Double(t_end - t_start) / NS_PER_S messageLog += "Heat up time: \(t_heat) seconds, please wait...\n" // if more than 5 seconds, then we're probably running on a slow device diff --git a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift index c78f107b3..147e0c63b 100644 --- a/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift +++ b/examples/llama.swiftui/llama.swiftui/UI/ContentView.swift @@ -42,46 +42,27 @@ struct ContentView: View { Button("Send") { sendText() } - .padding(8) - .background(Color.blue) - .foregroundColor(.white) - .cornerRadius(8) Button("Bench") { bench() } - .padding(8) - .background(Color.blue) - .foregroundColor(.white) - .cornerRadius(8) Button("Clear") { clear() } - .padding(8) - .background(Color.blue) - .foregroundColor(.white) - .cornerRadius(8) Button("Copy") { UIPasteboard.general.string = llamaState.messageLog } - .padding(8) - .background(Color.blue) - .foregroundColor(.white) - .cornerRadius(8) - } + }.buttonStyle(.bordered) - VStack { + VStack(alignment: .leading) { DownloadButton( llamaState: llamaState, modelName: "TinyLlama-1.1B (Q4_0, 0.6 GiB)", modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true", filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf" ) - .font(.system(size: 12)) - .padding(.top, 4) - .frame(maxWidth: .infinity, alignment: .leading) DownloadButton( llamaState: llamaState, @@ -89,7 +70,6 @@ struct ContentView: View { modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true", filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf" ) - .font(.system(size: 12)) DownloadButton( llamaState: llamaState, @@ -97,8 +77,6 @@ struct ContentView: View { modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true", filename: "tinyllama-1.1b-f16.gguf" ) - .font(.system(size: 12)) - .frame(maxWidth: .infinity, alignment: .leading) DownloadButton( llamaState: llamaState, @@ -106,7 +84,6 @@ struct ContentView: View { modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true", filename: "phi-2-q4_0.gguf" ) - .font(.system(size: 12)) DownloadButton( llamaState: llamaState, @@ -114,8 +91,6 @@ struct ContentView: View { modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true", filename: "phi-2-q8_0.gguf" ) - .font(.system(size: 12)) - .frame(maxWidth: .infinity, alignment: .leading) DownloadButton( llamaState: llamaState, @@ -123,15 +98,15 @@ struct ContentView: View { modelUrl: "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_0.gguf?download=true", filename: "mistral-7b-v0.1.Q4_0.gguf" ) - .font(.system(size: 12)) Button("Clear downloaded models") { ContentView.cleanupModelCaches() llamaState.cacheCleared = true } - .padding(8) - .font(.system(size: 12)) } + .padding(.top, 4) + .font(.system(size: 12)) + .frame(maxWidth: .infinity, alignment: .leading) } .padding() } diff --git a/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift b/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift index 4bd75cb69..c9f322ca1 100644 --- a/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift +++ b/examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift @@ -93,7 +93,7 @@ struct DownloadButton: View { print("Error: \(err.localizedDescription)") } }) { - Text("\(modelName) (Downloaded)") + Text("Load \(modelName)") } } else { Text("Unknown status") From 82d6eab224862a7044069fb9211dc4b29124264b Mon Sep 17 00:00:00 2001 From: andrijdavid Date: Fri, 29 Dec 2023 15:18:20 +0100 Subject: [PATCH 72/84] main-cmake-pkg : fix build issue (#4665) * Fix main-cmake-pkg compilation * Use glob to load common files * cmake : fix trailing whitespace --------- Co-authored-by: Georgi Gerganov --- examples/main-cmake-pkg/CMakeLists.txt | 27 ++++++-------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/examples/main-cmake-pkg/CMakeLists.txt index cb00edbbb..deb77d588 100644 --- a/examples/main-cmake-pkg/CMakeLists.txt +++ b/examples/main-cmake-pkg/CMakeLists.txt @@ -7,28 +7,13 @@ find_package(Llama 0.0.1 REQUIRED) # Bake common functionality in with target. Because applications # using the relocatable Llama package should be outside of the # source tree, main-cmake-pkg pretends the dependencies are built-in. - set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common") -add_library(common OBJECT - ${_common_path}/common.h - ${_common_path}/common.cpp - ${_common_path}/console.h - ${_common_path}/console.cpp - ${_common_path}/grammar-parser.h - ${_common_path}/grammar-parser.cpp - ${_common_path}/sampling.h - ${_common_path}/sampling.cpp - ) - -# WARNING: because build-info.h is auto-generated, it will only -# be available after the user has built the llama.cpp sources. -# -configure_file(${_common_path}/../build-info.h - ${CMAKE_CURRENT_BINARY_DIR}/build-info.h - COPYONLY) - -target_include_directories(common PUBLIC ${LLAMA_INCLUDE_DIR} - ${CMAKE_CURRENT_BINARY_DIR}) +add_library(common OBJECT) +file(GLOB _common_files + "${_common_path}/*.h" + "${_common_path}/*.cpp" +) +target_sources(common PRIVATE ${_common_files}) # If the common project was part of "main-cmake-pkg" the transient # defines would automatically be attached. Because the common func- From b93edd22f55d3e5268263c3edcdae1818505c078 Mon Sep 17 00:00:00 2001 From: Karthik Sethuraman Date: Fri, 29 Dec 2023 06:22:10 -0800 Subject: [PATCH 73/84] server : allow to generate multimodal embeddings (#4681) --- examples/server/README.md | 4 +++- examples/server/server.cpp | 12 +++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index f1e586a1c..718a7e064 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -166,7 +166,7 @@ node index.js `n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token (default: 0) - `image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:` In this case, `[img-12]` will be replaced by the embeddings of the image id 12 in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. + `image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. *Result JSON:* @@ -224,6 +224,8 @@ node index.js `content`: Set the text to process. + `image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `content`. You can determine the place of the image in the content as in the following: `Image: [img-21].\nCaption: This is a picture of a house`. In this case, `[img-21]` will be replaced by the embeddings of the image with id `21` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 21}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. + - **POST** `/infill`: For code infilling. Takes a prefix and a suffix and returns the predicted completion as stream. *Options:* diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c5035e202..31b8cf33d 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3077,7 +3077,17 @@ int main(int argc, char **argv) { prompt = ""; } - const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1); + + json image_data; + if (body.count("image_data") != 0) { + image_data = body["image_data"]; + } + else + { + image_data = ""; + } + + const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1); task_result result = llama.next_result(task_id); return res.set_content(result.result_json.dump(), "application/json; charset=utf-8"); }); From 60f55e888c29cbd87c4238dd19e85d0eef87245d Mon Sep 17 00:00:00 2001 From: SakuraUmi Date: Fri, 29 Dec 2023 22:22:44 +0800 Subject: [PATCH 74/84] server : fix OpenAI server sampling w.r.t. penalty. (#4675) --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 31b8cf33d..035eb24ac 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2461,7 +2461,7 @@ json oaicompat_completion_params_parse( llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta); llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl); llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p); - llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", 0); + llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n); llama_params["ignore_eos"] = json_value(body, "ignore_eos", false); llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z); From db49ff8ed7f0bb201176703441cc02911b08ef2a Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Fri, 29 Dec 2023 06:24:12 -0800 Subject: [PATCH 75/84] server : replace sleep with condition variables (#4673) The server currently schedules tasks using a sleep(5ms) busy loop. This adds unnecessary latency since most sleep implementations do a round up to the system scheduling quantum (usually 10ms). Other libc sleep impls spin for smaller time intervals which results in the server's busy loop consuming all available cpu. Having the explicit notify() / wait() code also helps aid in the readability of the server code. See mozilla-Ocho/llamafile@711344b --- examples/server/server.cpp | 41 ++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 035eb24ac..0aada8e28 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #ifndef SERVER_VERBOSE #define SERVER_VERBOSE 1 @@ -541,7 +542,9 @@ struct llama_server_context std::vector queue_results; std::vector queue_multitasks; std::mutex mutex_tasks; // also guards id_gen, and queue_multitasks + std::condition_variable condition_tasks; std::mutex mutex_results; + std::condition_variable condition_results; ~llama_server_context() { @@ -1169,7 +1172,7 @@ struct llama_server_context void send_error(task_server& task, std::string error) { - std::lock_guard lock(mutex_results); + std::unique_lock lock(mutex_results); task_result res; res.id = task.id; res.multitask_id = task.multitask_id; @@ -1177,6 +1180,7 @@ struct llama_server_context res.error = true; res.result_json = { { "content", error } }; queue_results.push_back(res); + condition_results.notify_all(); } void add_multi_task(int id, std::vector& sub_ids) @@ -1186,6 +1190,7 @@ struct llama_server_context multi.id = id; std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end())); queue_multitasks.push_back(multi); + condition_tasks.notify_one(); } void update_multi_task(int multitask_id, int subtask_id, task_result& result) @@ -1197,6 +1202,7 @@ struct llama_server_context { multitask.subtasks_remaining.erase(subtask_id); multitask.results.push_back(result); + condition_tasks.notify_one(); } } } @@ -1244,7 +1250,7 @@ struct llama_server_context void send_partial_response(llama_client_slot &slot, completion_token_output tkn) { - std::lock_guard lock(mutex_results); + std::unique_lock lock(mutex_results); task_result res; res.id = slot.task_id; res.multitask_id = slot.multitask_id; @@ -1280,11 +1286,12 @@ struct llama_server_context } queue_results.push_back(res); + condition_results.notify_all(); } void send_final_response(llama_client_slot &slot) { - std::lock_guard lock(mutex_results); + std::unique_lock lock(mutex_results); task_result res; res.id = slot.task_id; res.multitask_id = slot.multitask_id; @@ -1340,11 +1347,12 @@ struct llama_server_context } queue_results.push_back(res); + condition_results.notify_all(); } void send_embedding(llama_client_slot &slot) { - std::lock_guard lock(mutex_results); + std::unique_lock lock(mutex_results); task_result res; res.id = slot.task_id; res.multitask_id = slot.multitask_id; @@ -1372,6 +1380,7 @@ struct llama_server_context }; } queue_results.push_back(res); + condition_results.notify_all(); } int request_completion(json data, bool infill, bool embedding, int multitask_id) @@ -1395,6 +1404,7 @@ struct llama_server_context // otherwise, it's a single-prompt task, we actually queue it queue_tasks.push_back(task); + condition_tasks.notify_one(); return task.id; } @@ -1402,13 +1412,10 @@ struct llama_server_context { while (true) { - std::this_thread::sleep_for(std::chrono::microseconds(5)); - std::lock_guard lock(mutex_results); - - if (queue_results.empty()) - { - continue; - } + std::unique_lock lock(mutex_results); + condition_results.wait(lock, [&]{ + return !queue_results.empty(); + }); for (int i = 0; i < (int) queue_results.size(); i++) { @@ -1504,12 +1511,13 @@ struct llama_server_context void request_cancel(int task_id) { - std::lock_guard lock(mutex_tasks); + std::unique_lock lock(mutex_tasks); task_server task; task.id = id_gen++; task.type = CANCEL_TASK; task.target_id = task_id; queue_tasks.push_back(task); + condition_tasks.notify_one(); } int split_multiprompt_task(task_server& multiprompt_task) @@ -1535,7 +1543,7 @@ struct llama_server_context void process_tasks() { - std::lock_guard lock(mutex_tasks); + std::unique_lock lock(mutex_tasks); while (!queue_tasks.empty()) { task_server task = queue_tasks.front(); @@ -1607,6 +1615,7 @@ struct llama_server_context std::lock_guard lock(mutex_results); queue_results.push_back(aggregate_result); + condition_results.notify_all(); queue_iterator = queue_multitasks.erase(queue_iterator); } @@ -1637,8 +1646,10 @@ struct llama_server_context LOG_TEE("all slots are idle and system prompt is empty, clear the KV cache\n"); kv_cache_clear(); } - // avoid 100% usage of cpu all time - std::this_thread::sleep_for(std::chrono::milliseconds(5)); + std::unique_lock lock(mutex_tasks); + condition_tasks.wait(lock, [&]{ + return !queue_tasks.empty(); + }); } for (llama_client_slot &slot : slots) From 4af4801566bc262a38fb77f51edf278ac323c2bd Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Fri, 29 Dec 2023 06:38:38 -0800 Subject: [PATCH 76/84] llava-cli : refactor to use sampling library (#4669) This change makes it possible to use flags like `--grammar` when using the `llava-cli` program. The rest is just code cleanup deleting a long standing TODO comment. This change also ensures that logging information is emitted to stderr which helps the `llava-cli` command be more friendly to shell scripts. See Mozilla-Ocho/llamafile@1cd334f --- examples/llava/llava-cli.cpp | 85 ++++++------------------------------ 1 file changed, 13 insertions(+), 72 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 31f8cd8e0..502b788b1 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -39,73 +39,11 @@ static bool eval_string(struct llama_context * ctx_llama, const char* str, int n return true; } -// TODO: use common/sampling.h -static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { - auto & sparams = params.sparams; - - // out of user input, sample next token - const float temp = sparams.temp; - const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; - const float top_p = sparams.top_p; - const float tfs_z = sparams.tfs_z; - const float typical_p = sparams.typical_p; - // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; - // const float repeat_penalty = sparams.repeat_penalty; - // const float alpha_presence = sparams.presence_penalty; - // const float alpha_frequency = sparams.frequency_penalty; - const int mirostat = sparams.mirostat; - const float mirostat_tau = sparams.mirostat_tau; - const float mirostat_eta = sparams.mirostat_eta; - // const bool penalize_nl = sparams.penalize_nl; - - llama_token id = 0; - { - auto logits = llama_get_logits(ctx_llama); - auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); - - // Apply params.logit_bias map - for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { - logits[it->first] += it->second; - } - - std::vector candidates; - candidates.reserve(n_vocab); - for (llama_token token_id = 0; token_id < n_vocab; token_id++) { - candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); - } - - llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; - - if (temp <= 0) { - // Greedy sampling - id = llama_sample_token_greedy(ctx_llama, &candidates_p); - } else { - if (mirostat == 1) { - static float mirostat_mu = 2.0f * mirostat_tau; - const int mirostat_m = 100; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); - } else if (mirostat == 2) { - static float mirostat_mu = 2.0f * mirostat_tau; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); - } else { - // Temperature sampling - llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); - llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); - llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); - llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token(ctx_llama, &candidates_p); - } - } - } - - return id; -} - -static const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { - int id = sample_id(ctx_llama, params); +static const char * sample(struct llama_sampling_context * ctx_sampling, + struct llama_context * ctx_llama, + int * n_past) { + const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL); + llama_sampling_accept(ctx_sampling, ctx_llama, id, true); static std::string ret; if (id == llama_token_eos(llama_get_model(ctx_llama))) { ret = ""; @@ -174,8 +112,8 @@ struct llava_context { }; static void show_additional_info(int /*argc*/, char ** argv) { - printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); - printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); + fprintf(stderr, "\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + fprintf(stderr, " note: a lower temperature value like 0.1 is recommended for better quality.\n"); } static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) { @@ -185,7 +123,7 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para auto prompt = params->prompt; if (prompt_contains_image(prompt)) { if (!params->image.empty()) { - printf("using base64 encoded image instead of command line image path\n"); + fprintf(stderr, "using base64 encoded image instead of command line image path\n"); } embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt); if (!embed) { @@ -217,16 +155,19 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ // generate the response - printf("\n"); + fprintf(stderr, "\n"); + + struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams); for (int i = 0; i < max_tgt_len; i++) { - const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past); + const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past); if (strcmp(tmp, "") == 0) break; printf("%s", tmp); fflush(stdout); } + llama_sampling_free(ctx_sampling); printf("\n"); } From 97bbca6e8522d18041fcde6c3d0907a52ce36446 Mon Sep 17 00:00:00 2001 From: Cuong Trinh Manh Date: Fri, 29 Dec 2023 21:39:15 +0700 Subject: [PATCH 77/84] cmake : fix ld warning duplicate libraries libllama.a (#4671) * fix "ld: warning: ignoring duplicate libraries: '../libllama.a'" * fix warning in example. --- common/CMakeLists.txt | 2 +- examples/llava/CMakeLists.txt | 2 +- examples/server/CMakeLists.txt | 2 +- tests/CMakeLists.txt | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index b5d5453d2..f79acfef1 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -65,4 +65,4 @@ endif() target_include_directories(${TARGET} PUBLIC .) target_compile_features(${TARGET} PUBLIC cxx_std_11) -target_link_libraries(${TARGET} PRIVATE llama build_info) +target_link_libraries(${TARGET} PRIVATE build_info PUBLIC llama) diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 8ea3e5c83..48dae1506 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -32,5 +32,5 @@ endif() set(TARGET llava-cli) add_executable(llava-cli llava-cli.cpp) install(TARGETS llava-cli RUNTIME) -target_link_libraries(llava-cli PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(llava-cli PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(llava PRIVATE cxx_std_11) diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index 859cd12c6..81709e448 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -6,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME) target_compile_definitions(${TARGET} PRIVATE SERVER_VERBOSE=$ ) -target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) if (WIN32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) endif() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9b5e69d13..7c932240d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -2,7 +2,7 @@ function(llama_build_executable source) get_filename_component(TEST_TARGET ${source} NAME_WE) add_executable(${TEST_TARGET} ${source}) install(TARGETS ${TEST_TARGET} RUNTIME) - target_link_libraries(${TEST_TARGET} PRIVATE llama common) + target_link_libraries(${TEST_TARGET} PRIVATE common) endfunction() function(llama_test_executable name source) @@ -14,7 +14,7 @@ function(llama_build_and_test_executable source) get_filename_component(TEST_TARGET ${source} NAME_WE) add_executable(${TEST_TARGET} ${source}) install(TARGETS ${TEST_TARGET} RUNTIME) - target_link_libraries(${TEST_TARGET} PRIVATE llama common) + target_link_libraries(${TEST_TARGET} PRIVATE common) add_test(NAME ${TEST_TARGET} COMMAND $ ${ARGN}) endfunction() From 68eccbdc5b56f2a2450f9a8463f9934388cafabf Mon Sep 17 00:00:00 2001 From: Philip Taron Date: Fri, 29 Dec 2023 06:42:26 -0800 Subject: [PATCH 78/84] flake.nix : rewrite (#4605) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * flake.lock: update to hotfix CUDA::cuda_driver Required to support https://github.com/ggerganov/llama.cpp/pull/4606 * flake.nix: rewrite 1. Split into separate files per output. 2. Added overlays, so that this flake can be integrated into others. The names in the overlay are `llama-cpp`, `llama-cpp-opencl`, `llama-cpp-cuda`, and `llama-cpp-rocm` so that they fit into the broader set of Nix packages from [nixpkgs](https://github.com/nixos/nixpkgs). 3. Use [callPackage](https://summer.nixos.org/blog/callpackage-a-tool-for-the-lazy/) rather than `with pkgs;` so that there's dependency injection rather than dependency lookup. 4. Add a description and meta information for each package. The description includes a bit about what's trying to accelerate each one. 5. Use specific CUDA packages instead of cudatoolkit on the advice of SomeoneSerge. 6. Format with `serokell/nixfmt` for a consistent style. 7. Update `flake.lock` with the latest goods. * flake.nix: use finalPackage instead of passing it manually * nix: unclutter darwin support * nix: pass most darwin frameworks unconditionally ...for simplicity * *.nix: nixfmt nix shell github:piegamesde/nixfmt/rfc101-style --command \ nixfmt flake.nix .devops/nix/*.nix * flake.nix: add maintainers * nix: move meta down to follow Nixpkgs style more closely * nix: add missing meta attributes nix: clarify the interpretation of meta.maintainers nix: clarify the meaning of "broken" and "badPlatforms" nix: passthru: expose the use* flags for inspection E.g.: ``` ❯ nix eval .#cuda.useCuda true ``` * flake.nix: avoid re-evaluating nixpkgs too many times * flake.nix: use flake-parts * nix: migrate to pname+version * flake.nix: overlay: expose both the namespace and the default attribute * ci: add the (Nix) flakestry workflow * nix: cmakeFlags: explicit OFF bools * nix: cuda: reduce runtime closure * nix: fewer rebuilds * nix: respect config.cudaCapabilities * nix: add the impure driver's location to the DT_RUNPATHs * nix: clean sources more thoroughly ...this way outPaths change less frequently, and so there are fewer rebuilds * nix: explicit mpi support * nix: explicit jetson support * flake.nix: darwin: only expose the default --------- Co-authored-by: Someone Serge --- .devops/nix/apps.nix | 22 +++ .devops/nix/devshells.nix | 13 ++ .devops/nix/jetson-support.nix | 32 ++++ .devops/nix/nixpkgs-instances.nix | 35 ++++ .devops/nix/package.nix | 265 ++++++++++++++++++++++++++++ .devops/nix/scope.nix | 12 ++ .github/workflows/nix-flakestry.yml | 23 +++ flake.lock | 55 +++--- flake.nix | 226 ++++++++++-------------- 9 files changed, 524 insertions(+), 159 deletions(-) create mode 100644 .devops/nix/apps.nix create mode 100644 .devops/nix/devshells.nix create mode 100644 .devops/nix/jetson-support.nix create mode 100644 .devops/nix/nixpkgs-instances.nix create mode 100644 .devops/nix/package.nix create mode 100644 .devops/nix/scope.nix create mode 100644 .github/workflows/nix-flakestry.yml diff --git a/.devops/nix/apps.nix b/.devops/nix/apps.nix new file mode 100644 index 000000000..b8a12cc0a --- /dev/null +++ b/.devops/nix/apps.nix @@ -0,0 +1,22 @@ +{ + perSystem = + { config, lib, ... }: + { + apps = + let + inherit (config.packages) default; + binaries = [ + "llama" + "llama-embedding" + "llama-server" + "quantize" + "train-text-from-scratch" + ]; + mkApp = name: { + type = "app"; + program = "${default}/bin/${name}"; + }; + in + lib.genAttrs binaries mkApp; + }; +} diff --git a/.devops/nix/devshells.nix b/.devops/nix/devshells.nix new file mode 100644 index 000000000..1862f0f08 --- /dev/null +++ b/.devops/nix/devshells.nix @@ -0,0 +1,13 @@ +{ + perSystem = + { config, lib, ... }: + { + devShells = + lib.concatMapAttrs + (name: package: { + ${name} = package.passthru.shell; + ${name + "-extra"} = package.passthru.shell-extra; + }) + config.packages; + }; +} diff --git a/.devops/nix/jetson-support.nix b/.devops/nix/jetson-support.nix new file mode 100644 index 000000000..08426d2ab --- /dev/null +++ b/.devops/nix/jetson-support.nix @@ -0,0 +1,32 @@ +{ inputs, ... }: +{ + perSystem = + { + config, + system, + lib, + pkgsCuda, + ... + }: + lib.optionalAttrs (system == "aarch64-linux") { + packages = + let + caps.jetson-xavier = "7.2"; + caps.jetson-orin = "8.7"; + caps.jetson-nano = "5.3"; + + pkgsFor = + cap: + import inputs.nixpkgs { + inherit system; + config = { + cudaSupport = true; + cudaCapabilities = [ cap ]; + cudaEnableForwardCompat = false; + inherit (pkgsCuda.config) allowUnfreePredicate; + }; + }; + in + builtins.mapAttrs (name: cap: ((pkgsFor cap).callPackage ./scope.nix { }).llama-cpp) caps; + }; +} diff --git a/.devops/nix/nixpkgs-instances.nix b/.devops/nix/nixpkgs-instances.nix new file mode 100644 index 000000000..6e9872b28 --- /dev/null +++ b/.devops/nix/nixpkgs-instances.nix @@ -0,0 +1,35 @@ +{ inputs, ... }: +{ + # The _module.args definitions are passed on to modules as arguments. E.g. + # the module `{ pkgs ... }: { /* config */ }` implicitly uses + # `_module.args.pkgs` (defined in this case by flake-parts). + perSystem = + { system, ... }: + { + _module.args = { + pkgsCuda = import inputs.nixpkgs { + inherit system; + # Ensure dependencies use CUDA consistently (e.g. that openmpi, ucc, + # and ucx are built with CUDA support) + config.cudaSupport = true; + config.allowUnfreePredicate = + p: + builtins.all + ( + license: + license.free + || builtins.elem license.shortName [ + "CUDA EULA" + "cuDNN EULA" + ] + ) + (p.meta.licenses or [ p.meta.license ]); + }; + # Ensure dependencies use ROCm consistently + pkgsRocm = import inputs.nixpkgs { + inherit system; + config.rocmSupport = true; + }; + }; + }; +} diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix new file mode 100644 index 000000000..5f2a7c9f4 --- /dev/null +++ b/.devops/nix/package.nix @@ -0,0 +1,265 @@ +{ + lib, + config, + stdenv, + mkShell, + cmake, + ninja, + pkg-config, + git, + python3, + mpi, + openblas, # TODO: Use the generic `blas` so users could switch betwen alternative implementations + cudaPackages, + darwin, + rocmPackages, + clblast, + useBlas ? builtins.all (x: !x) [ + useCuda + useMetalKit + useOpenCL + useRocm + ], + useCuda ? config.cudaSupport, + useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin && !useOpenCL, + useMpi ? false, # Increases the runtime closure size by ~700M + useOpenCL ? false, + useRocm ? config.rocmSupport, + llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake +}@inputs: + +let + inherit (lib) + cmakeBool + cmakeFeature + optionals + strings + versionOlder + ; + + # It's necessary to consistently use backendStdenv when building with CUDA support, + # otherwise we get libstdc++ errors downstream. + stdenv = throw "Use effectiveStdenv instead"; + effectiveStdenv = if useCuda then cudaPackages.backendStdenv else inputs.stdenv; + + suffices = + lib.optionals useBlas [ "BLAS" ] + ++ lib.optionals useCuda [ "CUDA" ] + ++ lib.optionals useMetalKit [ "MetalKit" ] + ++ lib.optionals useMpi [ "MPI" ] + ++ lib.optionals useOpenCL [ "OpenCL" ] + ++ lib.optionals useRocm [ "ROCm" ]; + + pnameSuffix = + strings.optionalString (suffices != [ ]) + "-${strings.concatMapStringsSep "-" strings.toLower suffices}"; + descriptionSuffix = + strings.optionalString (suffices != [ ]) + ", accelerated with ${strings.concatStringsSep ", " suffices}"; + + # TODO: package the Python in this repository in a Nix-like way. + # It'd be nice to migrate to buildPythonPackage, as well as ensure this repo + # is PEP 517-compatible, and ensure the correct .dist-info is generated. + # https://peps.python.org/pep-0517/ + llama-python = python3.withPackages ( + ps: [ + ps.numpy + ps.sentencepiece + ] + ); + + # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime + llama-python-extra = python3.withPackages ( + ps: [ + ps.numpy + ps.sentencepiece + ps.torchWithoutCuda + ps.transformers + ] + ); + + # apple_sdk is supposed to choose sane defaults, no need to handle isAarch64 + # separately + darwinBuildInputs = + with darwin.apple_sdk.frameworks; + [ + Accelerate + CoreVideo + CoreGraphics + ] + ++ optionals useMetalKit [ MetalKit ]; + + cudaBuildInputs = with cudaPackages; [ + cuda_cccl.dev # + + # A temporary hack for reducing the closure size, remove once cudaPackages + # have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792 + cuda_cudart.dev + cuda_cudart.lib + cuda_cudart.static + libcublas.dev + libcublas.lib + libcublas.static + ]; + + rocmBuildInputs = with rocmPackages; [ + clr + hipblas + rocblas + ]; +in + +effectiveStdenv.mkDerivation ( + finalAttrs: { + pname = "llama-cpp${pnameSuffix}"; + version = llamaVersion; + + src = lib.cleanSourceWith { + filter = + name: type: + !(builtins.any (_: _) [ + (lib.hasSuffix ".nix" name) # Ignore *.nix files when computing outPaths + (name == "README.md") # Ignore *.md changes whe computing outPaths + (lib.hasPrefix "." name) # Skip hidden files and directories + ]); + src = lib.cleanSource ../../.; + }; + + postPatch = '' + substituteInPlace ./ggml-metal.m \ + --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";" + + # TODO: Package up each Python script or service appropriately. + # If we were to migrate to buildPythonPackage and prepare the `pyproject.toml`, + # we could make those *.py into setuptools' entrypoints + substituteInPlace ./*.py --replace "/usr/bin/env python" "${llama-python}/bin/python" + ''; + + nativeBuildInputs = + [ + cmake + ninja + pkg-config + git + ] + ++ optionals useCuda [ + cudaPackages.cuda_nvcc + + # TODO: Replace with autoAddDriverRunpath + # once https://github.com/NixOS/nixpkgs/pull/275241 has been merged + cudaPackages.autoAddOpenGLRunpathHook + ]; + + buildInputs = + optionals effectiveStdenv.isDarwin darwinBuildInputs + ++ optionals useCuda cudaBuildInputs + ++ optionals useMpi [ mpi ] + ++ optionals useOpenCL [ clblast ] + ++ optionals useRocm rocmBuildInputs; + + cmakeFlags = + [ + (cmakeBool "LLAMA_NATIVE" true) + (cmakeBool "LLAMA_BUILD_SERVER" true) + (cmakeBool "BUILD_SHARED_LIBS" true) + (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true) + (cmakeBool "LLAMA_BLAS" useBlas) + (cmakeBool "LLAMA_CLBLAST" useOpenCL) + (cmakeBool "LLAMA_CUBLAS" useCuda) + (cmakeBool "LLAMA_HIPBLAS" useRocm) + (cmakeBool "LLAMA_METAL" useMetalKit) + (cmakeBool "LLAMA_MPI" useMpi) + ] + ++ optionals useCuda [ + ( + with cudaPackages.flags; + cmakeFeature "CMAKE_CUDA_ARCHITECTURES" ( + builtins.concatStringsSep ";" (map dropDot cudaCapabilities) + ) + ) + ] + ++ optionals useRocm [ + (cmakeFeature "CMAKE_C_COMPILER" "hipcc") + (cmakeFeature "CMAKE_CXX_COMPILER" "hipcc") + + # Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM + # in https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt + # and select the line that matches the current nixpkgs version of rocBLAS. + # Should likely use `rocmPackages.clr.gpuTargets`. + "-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" + ] + ++ optionals useMetalKit [ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") ] + ++ optionals useBlas [ (lib.cmakeFeature "LLAMA_BLAS_VENDOR" "OpenBLAS") ]; + + # TODO(SomeoneSerge): It's better to add proper install targets at the CMake level, + # if they haven't been added yet. + postInstall = '' + mv $out/bin/main $out/bin/llama + mv $out/bin/server $out/bin/llama-server + mkdir -p $out/include + cp $src/llama.h $out/include/ + ''; + + # Define the shells here, but don't add in the inputsFrom to avoid recursion. + passthru = { + inherit + useBlas + useCuda + useMetalKit + useMpi + useOpenCL + useRocm + ; + + shell = mkShell { + name = "shell-${finalAttrs.finalPackage.name}"; + description = "contains numpy and sentencepiece"; + buildInputs = [ llama-python ]; + inputsFrom = [ finalAttrs.finalPackage ]; + }; + + shell-extra = mkShell { + name = "shell-extra-${finalAttrs.finalPackage.name}"; + description = "contains numpy, sentencepiece, torchWithoutCuda, and transformers"; + buildInputs = [ llama-python-extra ]; + inputsFrom = [ finalAttrs.finalPackage ]; + }; + }; + + meta = { + # Configurations we don't want even the CI to evaluate. Results in the + # "unsupported platform" messages. This is mostly a no-op, because + # cudaPackages would've refused to evaluate anyway. + badPlatforms = optionals (useCuda || useOpenCL) lib.platforms.darwin; + + # Configurations that are known to result in build failures. Can be + # overridden by importing Nixpkgs with `allowBroken = true`. + broken = (useMetalKit && !effectiveStdenv.isDarwin); + + description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}"; + homepage = "https://github.com/ggerganov/llama.cpp/"; + license = lib.licenses.mit; + + # Accommodates `nix run` and `lib.getExe` + mainProgram = "llama"; + + # These people might respond, on the best effort basis, if you ping them + # in case of Nix-specific regressions or for reviewing Nix-specific PRs. + # Consider adding yourself to this list if you want to ensure this flake + # stays maintained and you're willing to invest your time. Do not add + # other people without their consent. Consider removing people after + # they've been unreachable for long periods of time. + + # Note that lib.maintainers is defined in Nixpkgs, but you may just add + # an attrset following the same format as in + # https://github.com/NixOS/nixpkgs/blob/f36a80e54da29775c78d7eff0e628c2b4e34d1d7/maintainers/maintainer-list.nix + maintainers = with lib.maintainers; [ + philiptaron + SomeoneSerge + ]; + + # Extend `badPlatforms` instead + platforms = lib.platforms.all; + }; + } +) diff --git a/.devops/nix/scope.nix b/.devops/nix/scope.nix new file mode 100644 index 000000000..7932ac1e8 --- /dev/null +++ b/.devops/nix/scope.nix @@ -0,0 +1,12 @@ +{ + lib, + newScope, + llamaVersion ? "0.0.0", +}: + +lib.makeScope newScope ( + self: { + inherit llamaVersion; + llama-cpp = self.callPackage ./package.nix { }; + } +) diff --git a/.github/workflows/nix-flakestry.yml b/.github/workflows/nix-flakestry.yml new file mode 100644 index 000000000..3abfb3509 --- /dev/null +++ b/.github/workflows/nix-flakestry.yml @@ -0,0 +1,23 @@ +# Make the flake discoverable on https://flakestry.dev +name: "Publish a flake to flakestry" +on: + push: + tags: + - "v?[0-9]+.[0-9]+.[0-9]+" + - "v?[0-9]+.[0-9]+" + workflow_dispatch: + inputs: + tag: + description: "The existing tag to publish" + type: "string" + required: true +jobs: + publish-flake: + runs-on: ubuntu-latest + permissions: + id-token: "write" + contents: "read" + steps: + - uses: flakestry/flakestry-publish@main + with: + version: "${{ inputs.tag || github.ref_name }}" diff --git a/flake.lock b/flake.lock index 0455f6561..3fcd1f45d 100644 --- a/flake.lock +++ b/flake.lock @@ -1,30 +1,30 @@ { "nodes": { - "flake-utils": { + "flake-parts": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1694529238, - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "lastModified": 1701473968, + "narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1698318101, - "narHash": "sha256-gUihHt3yPD7bVqg+k/UVHgngyaJ3DMEBchbymBMvK1E=", + "lastModified": 1703559957, + "narHash": "sha256-x9PUuMEPGUOMB51zNxrDr2QoHbYWlCS2xhFedm9MC5Q=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "63678e9f3d3afecfeafa0acead6239cdb447574c", + "rev": "75dd68c36f458c6593c5bbb48abfd3e59bfed380", "type": "github" }, "original": { @@ -34,26 +34,29 @@ "type": "github" } }, - "root": { - "inputs": { - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" - } - }, - "systems": { + "nixpkgs-lib": { "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "dir": "lib", + "lastModified": 1701253981, + "narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58", "type": "github" }, "original": { - "owner": "nix-systems", - "repo": "default", + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", "type": "github" } + }, + "root": { + "inputs": { + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 4cf28d5c1..2209070aa 100644 --- a/flake.nix +++ b/flake.nix @@ -1,139 +1,99 @@ { + description = "Port of Facebook's LLaMA model in C/C++"; + inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; - flake-utils.url = "github:numtide/flake-utils"; + flake-parts.url = "github:hercules-ci/flake-parts"; }; - outputs = { self, nixpkgs, flake-utils }: - flake-utils.lib.eachDefaultSystem (system: - let - name = "llama.cpp"; - src = ./.; - meta.mainProgram = "llama"; - inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin; - buildInputs = with pkgs; [ openmpi ]; - osSpecific = with pkgs; buildInputs ++ ( - if isAarch64 && isDarwin then - with pkgs.darwin.apple_sdk_11_0.frameworks; [ - Accelerate - MetalKit - ] - else if isAarch32 && isDarwin then - with pkgs.darwin.apple_sdk.frameworks; [ - Accelerate - CoreGraphics - CoreVideo - ] - else if isDarwin then - with pkgs.darwin.apple_sdk.frameworks; [ - Accelerate - CoreGraphics - CoreVideo - ] - else - with pkgs; [ openblas ] - ); - pkgs = import nixpkgs { inherit system; }; - nativeBuildInputs = with pkgs; [ cmake ninja pkg-config ]; - cudatoolkit_joined = with pkgs; symlinkJoin { - # HACK(Green-Sky): nix currently has issues with cmake findcudatoolkit - # see https://github.com/NixOS/nixpkgs/issues/224291 - # copied from jaxlib - name = "${cudaPackages.cudatoolkit.name}-merged"; - paths = [ - cudaPackages.cudatoolkit.lib - cudaPackages.cudatoolkit.out - ] ++ lib.optionals (lib.versionOlder cudaPackages.cudatoolkit.version "11") [ - # for some reason some of the required libs are in the targets/x86_64-linux - # directory; not sure why but this works around it - "${cudaPackages.cudatoolkit}/targets/${system}" - ]; - }; - llama-python = - pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]); - # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime - llama-python-extra = - pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]); - postPatch = '' - substituteInPlace ./ggml-metal.m \ - --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";" - substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python' - ''; - postInstall = '' - mv $out/bin/main $out/bin/llama - mv $out/bin/server $out/bin/llama-server - mkdir -p $out/include - cp ${src}/llama.h $out/include/ - ''; - cmakeFlags = [ "-DLLAMA_NATIVE=OFF" "-DLLAMA_BUILD_SERVER=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ]; - in + + # For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl: + # + # ```bash + # ❯ nix repl + # nix-repl> :lf github:ggerganov/llama.cpp + # Added 13 variables. + # nix-repl> outputs.apps.x86_64-linux.quantize + # { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/quantize"; type = "app"; } + # ``` + outputs = + { self, flake-parts, ... }@inputs: + let + # We could include the git revisions in the package names but those would + # needlessly trigger rebuilds: + # llamaVersion = self.dirtyShortRev or self.shortRev; + + # Nix already uses cryptographic hashes for versioning, so we'll just fix + # the fake semver for now: + llamaVersion = "0.0.0"; + in + flake-parts.lib.mkFlake { inherit inputs; } + { - packages.default = pkgs.stdenv.mkDerivation { - inherit name src meta postPatch nativeBuildInputs postInstall; - buildInputs = osSpecific; - cmakeFlags = cmakeFlags - ++ (if isAarch64 && isDarwin then [ - "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1" - "-DLLAMA_METAL=ON" - ] else [ - "-DLLAMA_BLAS=ON" - "-DLLAMA_BLAS_VENDOR=OpenBLAS" - ]); - }; - packages.opencl = pkgs.stdenv.mkDerivation { - inherit name src meta postPatch nativeBuildInputs postInstall; - buildInputs = with pkgs; buildInputs ++ [ clblast ]; - cmakeFlags = cmakeFlags ++ [ - "-DLLAMA_CLBLAST=ON" - ]; - }; - packages.cuda = pkgs.stdenv.mkDerivation { - inherit name src meta postPatch nativeBuildInputs postInstall; - buildInputs = with pkgs; buildInputs ++ [ cudatoolkit_joined ]; - cmakeFlags = cmakeFlags ++ [ - "-DLLAMA_CUBLAS=ON" - ]; - }; - packages.rocm = pkgs.stdenv.mkDerivation { - inherit name src meta postPatch nativeBuildInputs postInstall; - buildInputs = with pkgs.rocmPackages; buildInputs ++ [ clr hipblas rocblas ]; - cmakeFlags = cmakeFlags ++ [ - "-DLLAMA_HIPBLAS=1" - "-DCMAKE_C_COMPILER=hipcc" - "-DCMAKE_CXX_COMPILER=hipcc" - # Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM - # in github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt - # and select the line that matches the current nixpkgs version of rocBLAS. - "-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" - ]; - }; - apps.llama-server = { - type = "app"; - program = "${self.packages.${system}.default}/bin/llama-server"; - }; - apps.llama-embedding = { - type = "app"; - program = "${self.packages.${system}.default}/bin/embedding"; - }; - apps.llama = { - type = "app"; - program = "${self.packages.${system}.default}/bin/llama"; - }; - apps.quantize = { - type = "app"; - program = "${self.packages.${system}.default}/bin/quantize"; - }; - apps.train-text-from-scratch = { - type = "app"; - program = "${self.packages.${system}.default}/bin/train-text-from-scratch"; - }; - apps.default = self.apps.${system}.llama; - devShells.default = pkgs.mkShell { - buildInputs = [ llama-python ]; - packages = nativeBuildInputs ++ osSpecific; - }; - devShells.extra = pkgs.mkShell { - buildInputs = [ llama-python-extra ]; - packages = nativeBuildInputs ++ osSpecific; - }; - }); + + imports = [ + .devops/nix/nixpkgs-instances.nix + .devops/nix/apps.nix + .devops/nix/devshells.nix + .devops/nix/jetson-support.nix + ]; + + # An overlay can be used to have a more granular control over llama-cpp's + # dependencies and configuration, than that offered by the `.override` + # mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays. + # + # E.g. in a flake: + # ``` + # { nixpkgs, llama-cpp, ... }: + # let pkgs = import nixpkgs { + # overlays = [ (llama-cpp.overlays.default) ]; + # system = "aarch64-linux"; + # config.allowUnfree = true; + # config.cudaSupport = true; + # config.cudaCapabilities = [ "7.2" ]; + # config.cudaEnableForwardCompat = false; + # }; in { + # packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp; + # } + # ``` + # + # Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format + flake.overlays.default = + (final: prev: { + llamaPackages = final.callPackage .devops/nix/scope.nix { inherit llamaVersion; }; + inherit (final.llamaPackages) llama-cpp; + }); + + systems = [ + "aarch64-darwin" + "aarch64-linux" + "x86_64-darwin" # x86_64-darwin isn't tested (and likely isn't relevant) + "x86_64-linux" + ]; + + perSystem = + { + config, + lib, + pkgs, + pkgsCuda, + pkgsRocm, + ... + }: + { + # We don't use the overlay here so as to avoid making too many instances of nixpkgs, + # cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs + packages = + { + default = (pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp; + } + // lib.optionalAttrs pkgs.stdenv.isLinux { + opencl = config.packages.default.override { useOpenCL = true; }; + cuda = (pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp; + rocm = (pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp; + + mpi-cpu = config.packages.default.override { useMpi = true; }; + mpi-cuda = config.packages.default.override { useMpi = true; }; + }; + }; + }; } From 04ac0607e913ab91234dfb240e12a76509e30982 Mon Sep 17 00:00:00 2001 From: crasm Date: Fri, 29 Dec 2023 09:50:29 -0500 Subject: [PATCH 79/84] python : add check-requirements.sh and GitHub workflow (#4585) * python: add check-requirements.sh and GitHub workflow This script and workflow forces package versions to remain compatible across all convert*.py scripts, while allowing secondary convert scripts to import dependencies not wanted in convert.py. * Move requirements into ./requirements * Fail on "==" being used for package requirements (but can be suppressed) * Enforce "compatible release" syntax instead of == * Update workflow * Add upper version bound for transformers and protobuf * improve check-requirements.sh * small syntax change * don't remove venvs if nocleanup is passed * See if this fixes docker workflow * Move check-requirements.sh into ./scripts/ --------- Co-authored-by: Jared Van Bortel --- .devops/full-cuda.Dockerfile | 3 +- .devops/full-rocm.Dockerfile | 3 +- .devops/full.Dockerfile | 3 +- .devops/main-rocm.Dockerfile | 3 +- .../workflows/python-check-requirements.yml | 29 +++ convert-hf-to-gguf.py | 95 +++++----- convert-lora-to-ggml.py | 147 +++++++-------- convert-persimmon-to-gguf.py | 1 + requirements-hf-to-gguf.txt | 3 - requirements.txt | 17 +- .../requirements-convert-hf-to-gguf.txt | 2 + ...equirements-convert-llama-ggml-to-gguf.txt | 1 + .../requirements-convert-lora-to-ggml.txt | 2 + ...requirements-convert-persimmon-to-gguf.txt | 2 + requirements/requirements-convert.txt | 5 + scripts/check-requirements.sh | 174 ++++++++++++++++++ 16 files changed, 360 insertions(+), 130 deletions(-) create mode 100644 .github/workflows/python-check-requirements.yml mode change 100644 => 100755 convert-persimmon-to-gguf.py delete mode 100644 requirements-hf-to-gguf.txt create mode 100644 requirements/requirements-convert-hf-to-gguf.txt create mode 100644 requirements/requirements-convert-llama-ggml-to-gguf.txt create mode 100644 requirements/requirements-convert-lora-to-ggml.txt create mode 100644 requirements/requirements-convert-persimmon-to-gguf.txt create mode 100644 requirements/requirements-convert.txt create mode 100755 scripts/check-requirements.sh diff --git a/.devops/full-cuda.Dockerfile b/.devops/full-cuda.Dockerfile index 360602d65..77a9ddc14 100644 --- a/.devops/full-cuda.Dockerfile +++ b/.devops/full-cuda.Dockerfile @@ -14,7 +14,8 @@ ARG CUDA_DOCKER_ARCH=all RUN apt-get update && \ apt-get install -y build-essential python3 python3-pip git -COPY requirements.txt requirements.txt +COPY requirements.txt requirements.txt +COPY requirements requirements RUN pip install --upgrade pip setuptools wheel \ && pip install -r requirements.txt diff --git a/.devops/full-rocm.Dockerfile b/.devops/full-rocm.Dockerfile index 6c521e9b4..8b9633dc4 100644 --- a/.devops/full-rocm.Dockerfile +++ b/.devops/full-rocm.Dockerfile @@ -23,7 +23,8 @@ ARG ROCM_DOCKER_ARCH=\ gfx1101 \ gfx1102 -COPY requirements.txt requirements.txt +COPY requirements.txt requirements.txt +COPY requirements requirements RUN pip install --upgrade pip setuptools wheel \ && pip install -r requirements.txt diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile index 687628b35..cef1297d3 100644 --- a/.devops/full.Dockerfile +++ b/.devops/full.Dockerfile @@ -5,7 +5,8 @@ FROM ubuntu:$UBUNTU_VERSION as build RUN apt-get update && \ apt-get install -y build-essential python3 python3-pip git -COPY requirements.txt requirements.txt +COPY requirements.txt requirements.txt +COPY requirements requirements RUN pip install --upgrade pip setuptools wheel \ && pip install -r requirements.txt diff --git a/.devops/main-rocm.Dockerfile b/.devops/main-rocm.Dockerfile index 789deff6d..0a706dc73 100644 --- a/.devops/main-rocm.Dockerfile +++ b/.devops/main-rocm.Dockerfile @@ -23,7 +23,8 @@ ARG ROCM_DOCKER_ARCH=\ gfx1101 \ gfx1102 -COPY requirements.txt requirements.txt +COPY requirements.txt requirements.txt +COPY requirements requirements RUN pip install --upgrade pip setuptools wheel \ && pip install -r requirements.txt diff --git a/.github/workflows/python-check-requirements.yml b/.github/workflows/python-check-requirements.yml new file mode 100644 index 000000000..92e1108b3 --- /dev/null +++ b/.github/workflows/python-check-requirements.yml @@ -0,0 +1,29 @@ +name: Python check requirements.txt + +on: + push: + paths: + - 'scripts/check-requirements.sh' + - 'convert*.py' + - 'requirements.txt' + - 'requirements/*.txt' + pull_request: + paths: + - 'scripts/check-requirements.sh' + - 'convert*.py' + - 'requirements.txt' + - 'requirements/*.txt' + +jobs: + python-check-requirements: + runs-on: ubuntu-latest + name: check-requirements + steps: + - name: Check out source repository + uses: actions/checkout@v3 + - name: Set up Python environment + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: Run check-requirements.sh script + run: bash scripts/check-requirements.sh nocleanup diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 3557a825e..51724c0df 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -242,7 +242,7 @@ class Model: tokens: list[bytearray] = [] toktypes: list[int] = [] - from transformers import AutoTokenizer # type: ignore[attr-defined] + from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(dir_model) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size @@ -856,7 +856,7 @@ class StableLMModel(Model): hparams = self.hparams block_count = hparams["num_hidden_layers"] - self.gguf_writer.add_name(dir_model.name) + self.gguf_writer.add_name(self.dir_model.name) self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) self.gguf_writer.add_embedding_length(hparams["hidden_size"]) self.gguf_writer.add_block_count(block_count) @@ -902,7 +902,7 @@ class QwenModel(Model): tokens: list[bytearray] = [] toktypes: list[int] = [] - from transformers import AutoTokenizer # type: ignore[attr-defined] + from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) vocab_size = hparams["vocab_size"] assert max(tokenizer.get_vocab().values()) < vocab_size @@ -1185,57 +1185,62 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() -args = parse_args() +def main() -> None: + args = parse_args() -dir_model = args.model + dir_model = args.model -if args.awq_path: - sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) - from awq.apply_awq import add_scale_weights - tmp_model_path = args.model / "weighted_model" - dir_model = tmp_model_path - if tmp_model_path.is_dir(): - print(f"{tmp_model_path} exists as a weighted model.") + if args.awq_path: + sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) + from awq.apply_awq import add_scale_weights + tmp_model_path = args.model / "weighted_model" + dir_model = tmp_model_path + if tmp_model_path.is_dir(): + print(f"{tmp_model_path} exists as a weighted model.") + else: + tmp_model_path.mkdir(parents=True, exist_ok=True) + print("Saving new weighted model ...") + add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path)) + print(f"Saved weighted model at {tmp_model_path}.") + + if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file=sys.stderr) + sys.exit(1) + + ftype_map = { + "f32": gguf.GGMLQuantizationType.F32, + "f16": gguf.GGMLQuantizationType.F16, + } + + if args.outfile is not None: + fname_out = args.outfile else: - tmp_model_path.mkdir(parents=True, exist_ok=True) - print("Saving new weighted model ...") - add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path)) - print(f"Saved weighted model at {tmp_model_path}.") + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{args.outtype}.gguf' -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file=sys.stderr) - sys.exit(1) + print(f"Loading model: {dir_model.name}") -ftype_map = { - "f32": gguf.GGMLQuantizationType.F32, - "f16": gguf.GGMLQuantizationType.F16, -} + hparams = Model.load_hparams(dir_model) -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{args.outtype}.gguf' + with torch.inference_mode(): + model_class = Model.from_model_architecture(hparams["architectures"][0]) + model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian) -print(f"Loading model: {dir_model.name}") + print("Set model parameters") + model_instance.set_gguf_parameters() -hparams = Model.load_hparams(dir_model) + print("Set model tokenizer") + model_instance.set_vocab() -with torch.inference_mode(): - model_class = Model.from_model_architecture(hparams["architectures"][0]) - model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian) + if args.vocab_only: + print(f"Exporting model vocab to '{fname_out}'") + model_instance.write_vocab() + else: + print(f"Exporting model to '{fname_out}'") + model_instance.write() - print("Set model parameters") - model_instance.set_gguf_parameters() + print(f"Model successfully exported to '{fname_out}'") - print("Set model tokenizer") - model_instance.set_vocab() - if args.vocab_only: - print(f"Exporting model vocab to '{fname_out}'") - model_instance.write_vocab() - else: - print(f"Exporting model to '{fname_out}'") - model_instance.write() - - print(f"Model successfully exported to '{fname_out}'") +if __name__ == '__main__': + main() diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index 53bb8a3d9..35ce152f4 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -47,95 +47,96 @@ def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_ty fout.seek((fout.tell() + 31) & -32) -if len(sys.argv) < 2: - print(f"Usage: python {sys.argv[0]} [arch]") - print( - "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'" - ) - print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)") - sys.exit(1) +if __name__ == '__main__': + if len(sys.argv) < 2: + print(f"Usage: python {sys.argv[0]} [arch]") + print( + "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'" + ) + print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)") + sys.exit(1) -input_json = os.path.join(sys.argv[1], "adapter_config.json") -input_model = os.path.join(sys.argv[1], "adapter_model.bin") -output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") + input_json = os.path.join(sys.argv[1], "adapter_config.json") + input_model = os.path.join(sys.argv[1], "adapter_model.bin") + output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") -model = torch.load(input_model, map_location="cpu") -arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama" + model = torch.load(input_model, map_location="cpu") + arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama" -if arch_name not in gguf.MODEL_ARCH_NAMES.values(): - print(f"Error: unsupported architecture {arch_name}") - sys.exit(1) + if arch_name not in gguf.MODEL_ARCH_NAMES.values(): + print(f"Error: unsupported architecture {arch_name}") + sys.exit(1) -arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)] -name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone + arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)] + name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone -with open(input_json, "r") as f: - params = json.load(f) + with open(input_json, "r") as f: + params = json.load(f) -if params["peft_type"] != "LORA": - print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA") - sys.exit(1) + if params["peft_type"] != "LORA": + print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA") + sys.exit(1) -if params["fan_in_fan_out"] is True: - print("Error: param fan_in_fan_out is not supported") - sys.exit(1) + if params["fan_in_fan_out"] is True: + print("Error: param fan_in_fan_out is not supported") + sys.exit(1) -if params["bias"] is not None and params["bias"] != "none": - print("Error: param bias is not supported") - sys.exit(1) + if params["bias"] is not None and params["bias"] != "none": + print("Error: param bias is not supported") + sys.exit(1) -# TODO: these seem to be layers that have been trained but without lora. -# doesn't seem widely used but eventually should be supported -if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0: - print("Error: param modules_to_save is not supported") - sys.exit(1) + # TODO: these seem to be layers that have been trained but without lora. + # doesn't seem widely used but eventually should be supported + if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0: + print("Error: param modules_to_save is not supported") + sys.exit(1) -with open(output_path, "wb") as fout: - fout.truncate() + with open(output_path, "wb") as fout: + fout.truncate() - write_file_header(fout, params) - for k, v in model.items(): - orig_k = k - if k.endswith(".default.weight"): - k = k.replace(".default.weight", ".weight") - if k in ["llama_proj.weight", "llama_proj.bias"]: - continue - if k.endswith("lora_A.weight"): - if v.dtype != torch.float16 and v.dtype != torch.float32: + write_file_header(fout, params) + for k, v in model.items(): + orig_k = k + if k.endswith(".default.weight"): + k = k.replace(".default.weight", ".weight") + if k in ["llama_proj.weight", "llama_proj.bias"]: + continue + if k.endswith("lora_A.weight"): + if v.dtype != torch.float16 and v.dtype != torch.float32: + v = v.float() + v = v.T + else: v = v.float() - v = v.T - else: - v = v.float() - t = v.detach().numpy() + t = v.detach().numpy() - prefix = "base_model.model." - if k.startswith(prefix): - k = k[len(prefix) :] + prefix = "base_model.model." + if k.startswith(prefix): + k = k[len(prefix) :] - lora_suffixes = (".lora_A.weight", ".lora_B.weight") - if k.endswith(lora_suffixes): - suffix = k[-len(lora_suffixes[0]):] - k = k[: -len(lora_suffixes[0])] - else: - print(f"Error: unrecognized tensor name {orig_k}") - sys.exit(1) + lora_suffixes = (".lora_A.weight", ".lora_B.weight") + if k.endswith(lora_suffixes): + suffix = k[-len(lora_suffixes[0]):] + k = k[: -len(lora_suffixes[0])] + else: + print(f"Error: unrecognized tensor name {orig_k}") + sys.exit(1) - tname = name_map.get_name(k) - if tname is None: - print(f"Error: could not map tensor name {orig_k}") - print(" Note: the arch parameter must be specified if the model is not llama") - sys.exit(1) + tname = name_map.get_name(k) + if tname is None: + print(f"Error: could not map tensor name {orig_k}") + print(" Note: the arch parameter must be specified if the model is not llama") + sys.exit(1) - if suffix == ".lora_A.weight": - tname += ".weight.loraA" - elif suffix == ".lora_B.weight": - tname += ".weight.loraB" - else: - assert False + if suffix == ".lora_A.weight": + tname += ".weight.loraA" + elif suffix == ".lora_B.weight": + tname += ".weight.loraB" + else: + assert False - print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB") - write_tensor_header(fout, tname, t.shape, t.dtype) - t.tofile(fout) + print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB") + write_tensor_header(fout, tname, t.shape, t.dtype) + t.tofile(fout) -print(f"Converted {input_json} and {input_model} to {output_path}") + print(f"Converted {input_json} and {input_model} to {output_path}") diff --git a/convert-persimmon-to-gguf.py b/convert-persimmon-to-gguf.py old mode 100644 new mode 100755 index 206b7d5ff..1ba5864dc --- a/convert-persimmon-to-gguf.py +++ b/convert-persimmon-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import torch import os from pprint import pprint diff --git a/requirements-hf-to-gguf.txt b/requirements-hf-to-gguf.txt deleted file mode 100644 index f4600539e..000000000 --- a/requirements-hf-to-gguf.txt +++ /dev/null @@ -1,3 +0,0 @@ --r requirements.txt -torch==2.1.1 -transformers==4.35.2 diff --git a/requirements.txt b/requirements.txt index 1a1162566..d36f74520 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,12 @@ -numpy==1.24.4 -sentencepiece==0.1.98 -transformers>=4.34.0 -gguf>=0.1.0 -protobuf>=4.21.0 +# These requirements include all dependencies for all top-level python scripts +# for llama.cpp. Avoid adding packages here directly. +# +# Package versions must stay compatible across all top-level python scripts. +# + +-r ./requirements/requirements-convert.txt + +-r ./requirements/requirements-convert-hf-to-gguf.txt +-r ./requirements/requirements-convert-llama-ggml-to-gguf.txt +-r ./requirements/requirements-convert-lora-to-ggml.txt +-r ./requirements/requirements-convert-persimmon-to-gguf.txt diff --git a/requirements/requirements-convert-hf-to-gguf.txt b/requirements/requirements-convert-hf-to-gguf.txt new file mode 100644 index 000000000..6ac402610 --- /dev/null +++ b/requirements/requirements-convert-hf-to-gguf.txt @@ -0,0 +1,2 @@ +-r ./requirements-convert.txt +torch~=2.1.1 diff --git a/requirements/requirements-convert-llama-ggml-to-gguf.txt b/requirements/requirements-convert-llama-ggml-to-gguf.txt new file mode 100644 index 000000000..a0f37cd1c --- /dev/null +++ b/requirements/requirements-convert-llama-ggml-to-gguf.txt @@ -0,0 +1 @@ +-r ./requirements-convert.txt diff --git a/requirements/requirements-convert-lora-to-ggml.txt b/requirements/requirements-convert-lora-to-ggml.txt new file mode 100644 index 000000000..6ac402610 --- /dev/null +++ b/requirements/requirements-convert-lora-to-ggml.txt @@ -0,0 +1,2 @@ +-r ./requirements-convert.txt +torch~=2.1.1 diff --git a/requirements/requirements-convert-persimmon-to-gguf.txt b/requirements/requirements-convert-persimmon-to-gguf.txt new file mode 100644 index 000000000..6ac402610 --- /dev/null +++ b/requirements/requirements-convert-persimmon-to-gguf.txt @@ -0,0 +1,2 @@ +-r ./requirements-convert.txt +torch~=2.1.1 diff --git a/requirements/requirements-convert.txt b/requirements/requirements-convert.txt new file mode 100644 index 000000000..a3d6ecec0 --- /dev/null +++ b/requirements/requirements-convert.txt @@ -0,0 +1,5 @@ +numpy~=1.24.4 +sentencepiece~=0.1.98 +transformers>=4.35.2,<5.0.0 +gguf>=0.1.0 +protobuf>=4.21.0,<5.0.0 diff --git a/scripts/check-requirements.sh b/scripts/check-requirements.sh new file mode 100755 index 000000000..af7bab753 --- /dev/null +++ b/scripts/check-requirements.sh @@ -0,0 +1,174 @@ +#!/bin/bash +set -euo pipefail + +# +# check-requirements.sh checks all requirements files for each top-level +# convert*.py script. +# +# WARNING: This is quite IO intensive, because a fresh venv is set up for every +# python script. As of 2023-12-22, this writes ~2.7GB of data. An adequately +# sized tmpfs /tmp or ramdisk is recommended if running this frequently. +# +# usage: check-requirements.sh [] +# check-requirements.sh nocleanup [] +# +# where: +# - is a directory that can be used as the base for +# setting up the venvs. Defaults to `/tmp`. +# - 'nocleanup' as the first argument will disable automatic cleanup +# of the files created by this script. +# +# requires: +# - bash >= 3.2.57 +# - shellcheck +# +# For each script, it creates a fresh venv, `pip install`s the requirements, and +# finally imports the python script to check for `ImportError`. +# + +log() { + local level=$1 msg=$2 + printf >&2 '%s: %s\n' "$level" "$msg" +} + +debug() { + log DEBUG "$@" +} + +info() { + log INFO "$@" +} + +fatal() { + log FATAL "$@" + exit 1 +} + +cleanup() { + if [[ -n ${workdir+x} && -d $workdir && -w $workdir ]]; then + info "Removing $workdir" + local count=0 + rm -rfv -- "$workdir" | while read -r; do + if (( count++ > 750 )); then + printf . + count=0 + fi + done + printf '\n' + info "Removed $workdir" + fi +} + +do_cleanup=1 +if [[ ${1-} == nocleanup ]]; then + do_cleanup=0; shift +fi + +if (( do_cleanup )); then + trap exit INT TERM + trap cleanup EXIT +fi + +this=$(realpath -- "$0"); readonly this +cd "$(dirname "$this")/.." # PWD should stay in llama.cpp project directory + +shellcheck "$this" + +readonly reqs_dir=requirements + +if [[ ${1+x} ]]; then + tmp_dir=$(realpath -- "$1") + if [[ ! ( -d $tmp_dir && -w $tmp_dir ) ]]; then + fatal "$tmp_dir is not a writable directory" + fi +else + tmp_dir=/tmp +fi + +workdir=$(mktemp -d "$tmp_dir/check-requirements.XXXX"); readonly workdir +info "Working directory: $workdir" + +check_requirements() { + local reqs=$1 + + info "$reqs: beginning check" + pip --disable-pip-version-check install -qr "$reqs" + info "$reqs: OK" +} + +check_convert_script() { + local py=$1 # e.g. ./convert-hf-to-gguf.py + local pyname=${py##*/} # e.g. convert-hf-to-gguf.py + pyname=${pyname%.py} # e.g. convert-hf-to-gguf + + info "$py: beginning check" + + local reqs="$reqs_dir/requirements-$pyname.txt" + if [[ ! -r $reqs ]]; then + fatal "$py missing requirements. Expected: $reqs" + fi + + local venv="$workdir/$pyname-venv" + python3 -m venv "$venv" + + ( + # shellcheck source=/dev/null + source "$venv/bin/activate" + + check_requirements "$reqs" + + python - "$py" "$pyname" <<'EOF' +import sys +from importlib.machinery import SourceFileLoader +py, pyname = sys.argv[1:] +SourceFileLoader(pyname, py).load_module() +EOF + ) + + if (( do_cleanup )); then + rm -rf -- "$venv" + fi + + info "$py: imports OK" +} + +readonly ignore_eq_eq='check_requirements: ignore "=="' + +for req in "$reqs_dir"/*; do + # Check that all sub-requirements are added to top-level requirements.txt + if ! grep -qF "$req" requirements.txt; then + fatal "$req needs to be added to requirements.txt" + fi + + # Make sure exact release versions aren't being pinned in the requirements + # Filters out the ignore string + if grep -vF "$ignore_eq_eq" "$req" | grep -q '=='; then + tab=$'\t' + cat >&2 < Date: Sat, 30 Dec 2023 00:31:19 +0800 Subject: [PATCH 80/84] cuda: fix vmm oom issue on NVIDIA AGX Orin (#4687) Signed-off-by: hydai --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 9a9effcf5..09585b07d 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6662,7 +6662,7 @@ static void ggml_cuda_pool_free_leg(int device, void * ptr, size_t size) { // pool with virtual memory static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; -static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 36; // 64 GB +static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB static void * ggml_cuda_pool_malloc_vmm(int device, size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); From ce18d727a47f2473ca863a6f78bf3ad480008f72 Mon Sep 17 00:00:00 2001 From: Steward Garcia <57494570+FSSRepo@users.noreply.github.com> Date: Fri, 29 Dec 2023 11:52:15 -0500 Subject: [PATCH 81/84] clip : enable gpu backend (#4205) * clip: enable CUDA backend * add missing kernels * add enough padding for alignment * remove ggml_repeat of clip.cpp * add metal backend * llava : fixes - avoid ggml_repeat - use GGML_USE_ instead of CLIP_USE_ macros - remove unused vars --------- Co-authored-by: Georgi Gerganov --- examples/llava/CMakeLists.txt | 3 +- examples/llava/clip.cpp | 231 +++++++++++++++++++--------------- 2 files changed, 131 insertions(+), 103 deletions(-) diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 48dae1506..2985caff8 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -24,7 +24,8 @@ endif() if (NOT MSVC) target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h - endif() +endif() + if(TARGET BUILD_INFO) add_dependencies(llava BUILD_INFO) endif() diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index f06ec400d..f9326a5cc 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -16,12 +16,19 @@ #include "clip.h" #include "ggml.h" #include "ggml-alloc.h" +#include "ggml-backend.h" + +#ifdef GGML_USE_CUBLAS +#include "ggml-cuda.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" -#define CLIP_DEBUG - static std::string format(const char * fmt, ...) { va_list ap; va_list ap2; @@ -196,20 +203,6 @@ struct clip_vision_model { struct ggml_tensor * mm_2_b; }; -// Replacement for std::vector that doesn't require zero-initialization. -struct clip_buffer { - uint8_t * data = NULL; - size_t size = 0; - - void resize(size_t size) { - delete[] data; - data = new uint8_t[size]; - this->size = size; - } - - ~clip_buffer() { delete[] data; } -}; - struct clip_ctx { bool has_text_encoder = false; bool has_vision_encoder = false; @@ -223,9 +216,10 @@ struct clip_ctx { struct gguf_context * ctx_gguf; // memory buffers to evaluate the model - clip_buffer buf_compute; - clip_buffer buf_alloc; - ggml_allocr * alloc = NULL; + ggml_backend_buffer_t params_buffer = NULL; + ggml_backend_buffer_t compute_buffer = NULL; + ggml_backend_t backend = NULL; + ggml_allocr * compute_alloc = NULL; }; static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_image_f32_batch * imgs) { @@ -252,25 +246,20 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima if(ctx->has_llava_projector) { GGML_ASSERT(batch_size == 1); } - - const auto & buf_compute = ctx->buf_compute; - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ false, + /*.mem_size =*/ GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, }; - params.no_alloc = true; - struct ggml_context * ctx0 = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size, image_size, 3, batch_size); - ggml_allocr_alloc(ctx->alloc, inp_raw); + ggml_allocr_alloc(ctx->compute_alloc, inp_raw); - if (!ggml_allocr_is_measure(ctx->alloc)) { - float * data = (float *)ggml_get_data(inp_raw); + if (!ggml_allocr_is_measure(ctx->compute_alloc)) { + float * data = (float *)malloc(ggml_nbytes(inp_raw)); for (size_t i = 0; i < imgs->size; i++) { const int nx = imgs->data[i].nx; @@ -289,6 +278,8 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima } } } + ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw)); + free(data); } struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1); @@ -298,36 +289,39 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima // concat class_embeddings and patch_embeddings struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size); - ggml_allocr_alloc(ctx->alloc, embeddings); - if (!ggml_allocr_is_measure(ctx->alloc)) { - ggml_set_zero(embeddings); + ggml_allocr_alloc(ctx->compute_alloc, embeddings); + if (!ggml_allocr_is_measure(ctx->compute_alloc)) { + void* zero_mem = malloc(ggml_nbytes(embeddings)); + memset(zero_mem, 0, ggml_nbytes(embeddings)); + ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings)); + free(zero_mem); } - struct ggml_tensor * temp = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, 1, batch_size); - ggml_allocr_alloc(ctx->alloc, temp); + embeddings = ggml_acc(ctx0, embeddings, model.class_embedding, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0); - embeddings = ggml_acc(ctx0, embeddings, ggml_repeat(ctx0, model.class_embedding, temp), embeddings->nb[1], - embeddings->nb[2], embeddings->nb[3], 0); - embeddings = - ggml_acc(ctx0, embeddings, inp, embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]); + embeddings = ggml_acc(ctx0, embeddings, inp, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]); struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions); - ggml_allocr_alloc(ctx->alloc, positions); - if (!ggml_allocr_is_measure(ctx->alloc)) { + ggml_allocr_alloc(ctx->compute_alloc, positions); + if (!ggml_allocr_is_measure(ctx->compute_alloc)) { + int* positions_data = (int*)malloc(ggml_nbytes(positions)); for (int i = 0; i < num_positions; i++) { - ggml_set_i32_1d(positions, i, i); + positions_data[i] = i; } + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); } embeddings = - ggml_add(ctx0, embeddings, ggml_repeat(ctx0, ggml_get_rows(ctx0, model.position_embeddings, positions), embeddings)); + ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions)); // pre-layernorm { embeddings = ggml_norm(ctx0, embeddings, eps); - embeddings = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.pre_ln_w, embeddings), embeddings), - ggml_repeat(ctx0, model.pre_ln_b, embeddings)); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b); } // loop over layers @@ -340,15 +334,15 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima { cur = ggml_norm(ctx0, cur, eps); - cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_1_w, cur), cur), - ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), + model.layers[il].ln_1_b); } // self-attention { struct ggml_tensor * Q = - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, cur), ggml_mul_mat(ctx0, model.layers[il].q_w, cur)); + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b); Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size); @@ -356,14 +350,14 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size); struct ggml_tensor * K = - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].k_b, cur), ggml_mul_mat(ctx0, model.layers[il].k_w, cur)); + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b); K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size); K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size); struct ggml_tensor * V = - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].v_b, cur), ggml_mul_mat(ctx0, model.layers[il].v_w, cur)); + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b); V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size); V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); @@ -379,7 +373,7 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima } // attention output - cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].o_b, cur), ggml_mul_mat(ctx0, model.layers[il].o_w, cur)); + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b); // re-add the layer input, e.g., residual cur = ggml_add(ctx0, cur, embeddings); @@ -390,12 +384,11 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima { cur = ggml_norm(ctx0, cur, eps); - cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_2_w, cur), cur), - ggml_repeat(ctx0, model.layers[il].ln_2_b, cur)); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b); } cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); - cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].ff_i_b, cur), cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); if (ctx->use_gelu) { cur = ggml_gelu_inplace(ctx0, cur); @@ -404,7 +397,7 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima } cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); - cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].ff_o_b, cur), cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); // residual 2 cur = ggml_add(ctx0, embeddings, cur); @@ -417,23 +410,26 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]); struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches); - ggml_allocr_alloc(ctx->alloc, patches); - if (!ggml_allocr_is_measure(ctx->alloc)) { - for (int i = 0; i < num_patches; ++i) { - ggml_set_i32_1d(patches, i, i+1); + ggml_allocr_alloc(ctx->compute_alloc, patches); + if (!ggml_allocr_is_measure(ctx->compute_alloc)) { + int* patches_data = (int*)malloc(ggml_nbytes(patches)); + for (int i = 0; i < num_positions; i++) { + patches_data[i] = i + 1; } + ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); + free(patches_data); } embeddings = ggml_get_rows(ctx0, embeddings, patches); // mm projection 0 embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); - embeddings = ggml_add(ctx0, ggml_repeat(ctx0, model.mm_0_b, embeddings), embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); embeddings = ggml_gelu(ctx0, embeddings); embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings); - embeddings = ggml_add(ctx0, ggml_repeat(ctx0, model.mm_2_b, embeddings), embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); } // build the graph @@ -446,7 +442,6 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima // read and create ggml_context containing the tensors and their data struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { - struct ggml_context * meta = NULL; struct gguf_init_params params = { @@ -479,7 +474,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { printf("%s: ftype: %s\n", __func__, ftype_str.c_str()); printf("\n"); } - + const int n_tensors = gguf_get_n_tensors(ctx); // kv if (verbosity >= 3) { const int n_kv = gguf_get_n_kv(ctx); @@ -493,27 +488,38 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } // data - size_t ctx_size = 0; + size_t buffer_size = 0; { - const int n_tensors = gguf_get_n_tensors(ctx); - for (int i = 0; i < n_tensors; ++i) { const char * name = gguf_get_tensor_name(ctx, i); const size_t offset = gguf_get_tensor_offset(ctx, i); - struct ggml_tensor * cur = ggml_get_tensor(meta, name); - ctx_size += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; size_t tensor_size = ggml_nbytes(cur); - size_t padded_size = ggml_nbytes_pad(cur); - ctx_size += padded_size; + buffer_size += tensor_size; if (verbosity >= 3) { - printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, padded_size=%zu, offset=%zu\n", __func__, i, - ggml_n_dims(cur), cur->name, tensor_size, padded_size, offset); + printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu\n", __func__, i, + ggml_n_dims(cur), cur->name, tensor_size, offset); } } } + buffer_size += n_tensors * 128 /* CLIP PADDING */; + clip_ctx * new_clip = new clip_ctx; +#ifdef GGML_USE_CUBLAS + new_clip->backend = ggml_backend_cuda_init(0); + printf("%s: CLIP using CUDA backend\n", __func__); +#endif + +#ifdef GGML_USE_METAL + new_clip->backend = ggml_backend_metal_init(); + printf("%s: CLIP using Metal backend\n", __func__); +#endif + + if (!new_clip->backend) { + new_clip->backend = ggml_backend_cpu_init(); + printf("%s: CLIP using CPU backend\n", __func__); + } // model size and capabilities { @@ -539,17 +545,20 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { printf("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder); printf("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); printf("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector); - printf("%s: model size: %.2f MB\n", __func__, (ctx_size / 1024.0 / 1024.0)); + printf("%s: model size: %.2f MB\n", __func__, buffer_size / 1024.0 / 1024.0); printf("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0); } } + printf("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, buffer_size / (1024.0 * 1024.0), n_tensors); + // load tensors { + std::vector read_buf; struct ggml_init_params params = { - /*.mem_size =*/ ctx_size, + /*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ false, + /*.no_alloc =*/ true, }; new_clip->ctx = ggml_init(params); @@ -566,13 +575,21 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return nullptr; } - const int n_tensors = gguf_get_n_tensors(ctx); + // add tensors to context for (int i = 0; i < n_tensors; ++i) { const char * name = gguf_get_tensor_name(ctx, i); struct ggml_tensor * t = ggml_get_tensor(meta, name); struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx, t); ggml_set_name(cur, name); + } + // alloc memory and offload data + new_clip->params_buffer = ggml_backend_alloc_buffer(new_clip->backend, buffer_size); + ggml_allocr* alloc = ggml_allocr_new_from_buffer(new_clip->params_buffer); + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx, name); + ggml_allocr_alloc(alloc, cur); const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); fin.seekg(offset, std::ios::beg); if (!fin) { @@ -580,10 +597,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { clip_free(new_clip); return nullptr; } - - fin.read(reinterpret_cast(cur->data), ggml_nbytes(t)); + int num_bytes = ggml_nbytes(cur); + if (ggml_backend_is_cpu(new_clip->backend) +#ifdef GGML_USE_METAL + || ggml_backend_is_metal(new_clip->backend) +#endif + ) { + // for the CPU and Metal backend, we can read directly into the tensor + fin.read(reinterpret_cast(cur->data), num_bytes); + } else { + // read into a temporary buffer first, then copy to device memory + read_buf.resize(num_bytes); + fin.read(reinterpret_cast(read_buf.data()), num_bytes); + ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes); + } } - + ggml_allocr_free(alloc); fin.close(); } @@ -657,18 +686,16 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { // measure mem requirement and allocate { - static const size_t tensor_alignment = 32; - new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead()); - new_clip->alloc = ggml_allocr_new_measure(tensor_alignment); + new_clip->compute_alloc = ggml_allocr_new_measure_from_backend(new_clip->backend); clip_image_f32_batch batch; batch.size = 1; ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch); - size_t alloc_size = ggml_allocr_alloc_graph(new_clip->alloc, gf) + tensor_alignment; - ggml_allocr_free(new_clip->alloc); - new_clip->buf_alloc.resize(alloc_size); - new_clip->alloc = ggml_allocr_new(new_clip->buf_alloc.data, new_clip->buf_alloc.size, tensor_alignment); + size_t compute_memory_buffer_size = ggml_allocr_alloc_graph(new_clip->compute_alloc, gf); + ggml_allocr_free(new_clip->compute_alloc); + new_clip->compute_buffer = ggml_backend_alloc_buffer(new_clip->backend, compute_memory_buffer_size); + new_clip->compute_alloc = ggml_allocr_new_from_buffer(new_clip->compute_buffer); - printf("%s: total allocated memory: %.2f MB\n", __func__, (new_clip->buf_compute.size + alloc_size)/1024.0/1024.0); + printf("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0); } return new_clip; @@ -852,29 +879,29 @@ bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const cl } // reset alloc buffer to clean the memory from previous invocations - ggml_allocr_reset(ctx->alloc); + ggml_allocr_reset(ctx->compute_alloc); // build the inference graph ggml_cgraph * gf = clip_image_build_graph(ctx, imgs); - ggml_allocr_alloc_graph(ctx->alloc, gf); + ggml_allocr_alloc_graph(ctx->compute_alloc, gf); - struct ggml_cplan plan = ggml_graph_plan(gf, n_threads); - if (plan.work_size > 0) { - plan.work_data = (uint8_t *)malloc(plan.work_size); + if (ggml_backend_is_cpu(ctx->backend)) { + ggml_backend_cpu_set_n_threads(ctx->backend, n_threads); } - ggml_graph_compute(gf, &plan); +#ifdef GGML_USE_METAL + if (ggml_backend_is_metal(ctx->backend)) { + ggml_backend_metal_set_n_cb(ctx->backend, n_threads); + } +#endif + + ggml_backend_graph_compute(ctx->backend, gf); // the last node is the embedding tensor -struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1]; + struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1]; // copy the embeddings to the location passed by the user - memcpy(vec, ggml_get_data_f32(embeddings), ggml_nbytes(embeddings)); - - if (plan.work_size > 0) { - free(plan.work_data); - } - + ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings)); return true; } @@ -1045,8 +1072,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i gguf_free(ctx_out); { - printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); - printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); + printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); int64_t sum_all = 0; for (size_t i = 0; i < hist_all.size(); ++i) { From 0235b9b571f3cc7d2b8836409a5404b41ce1379c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 29 Dec 2023 18:53:34 +0200 Subject: [PATCH 82/84] clip : use ggml_backend_buffer_is_host (#4205) --- examples/llava/clip.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index f9326a5cc..6a731eeec 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -598,11 +598,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return nullptr; } int num_bytes = ggml_nbytes(cur); - if (ggml_backend_is_cpu(new_clip->backend) -#ifdef GGML_USE_METAL - || ggml_backend_is_metal(new_clip->backend) -#endif - ) { + if (ggml_backend_buffer_is_host(new_clip->params_buffer)) { // for the CPU and Metal backend, we can read directly into the tensor fin.read(reinterpret_cast(cur->data), num_bytes); } else { From a20f3c7465d6d1b33767757c2760643b799a81bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 29 Dec 2023 23:12:53 +0100 Subject: [PATCH 83/84] CUDA: fix tensor core logic for Pascal and HIP (#4682) --- ggml-cuda.cu | 72 ++++++++++++++++++++++++++++------------------------ 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 09585b07d..71a64ca09 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -123,24 +123,6 @@ #define GGML_CUDA_MAX_NODES 8192 -// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication -// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant -// for large computational tasks. the drawback is that this requires some extra amount of VRAM: -// - 7B quantum model: +100-200 MB -// - 13B quantum model: +200-400 MB -// -//#define GGML_CUDA_FORCE_MMQ - -// TODO: improve this to be correct for more hardware -// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores -// probably other such cases, and not sure what happens on AMD hardware -#if !defined(GGML_CUDA_FORCE_MMQ) -#define CUDA_USE_TENSOR_CORES -#endif - -// max batch size to use MMQ kernels when tensor cores are available -#define MMQ_MAX_BATCH_SIZE 32 - #if defined(GGML_USE_HIPBLAS) #define __CUDA_ARCH__ 1300 @@ -207,6 +189,23 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { } #endif // defined(GGML_USE_HIPBLAS) +// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication +// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant +// for large computational tasks. the drawback is that this requires some extra amount of VRAM: +// - 7B quantum model: +100-200 MB +// - 13B quantum model: +200-400 MB +// +//#define GGML_CUDA_FORCE_MMQ + +// TODO: improve this to be correct for more hardware +// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores +#if !defined(GGML_CUDA_FORCE_MMQ) && (!defined(GGML_USE_HIPBLAS) || defined(RDNA3)) +#define CUDA_USE_TENSOR_CORES +#endif + +// max batch size to use MMQ kernels when tensor cores are available +#define MMQ_MAX_BATCH_SIZE 32 + #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif @@ -8661,11 +8660,26 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } } -#ifdef CUDA_USE_TENSOR_CORES - const bool use_tensor_cores = true; +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + const bool fp16_performance_good = true; + +#ifdef RDNA3 + const bool use_mul_mat_q = false; #else - const bool use_tensor_cores = false; -#endif + const bool use_mul_mat_q = true; +#endif // RDNA3 + +#else + + const bool fp16_performance_good = min_compute_capability >= CC_VOLTA; + bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type); +#ifdef CUDA_USE_TENSOR_CORES + // when tensor cores are available, use them for large batch size + // ref: https://github.com/ggerganov/llama.cpp/pull/3776 + use_mul_mat_q = use_mul_mat_q && !(fp16_performance_good && src1->ne[1] > MMQ_MAX_BATCH_SIZE); +#endif // CUDA_USE_TENSOR_CORES + +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) // debug helpers //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); @@ -8675,13 +8689,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); - if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { + if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch ggml_cuda_mul_mat_vec_p021(src0, src1, dst); - } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { + } else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { @@ -8701,14 +8715,6 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false); } } else { - bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type); - - // when tensor cores are available, use them for large batch size - // ref: https://github.com/ggerganov/llama.cpp/pull/3776 - if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne[1] > MMQ_MAX_BATCH_SIZE) { - use_mul_mat_q = false; - } - if (use_mul_mat_q) { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_q, true); } else { From 24a447e20af425fa44cf10feaa632b6bb596c80f Mon Sep 17 00:00:00 2001 From: automaticcat Date: Sat, 30 Dec 2023 15:07:48 +0700 Subject: [PATCH 84/84] ggml : add ggml_cpu_has_avx_vnni() (#4589) * feat: add avx_vnni based on intel documents * ggml: add avx vnni based on intel document * llama: add avx vnni information display * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * Update ggml.c Fix indentation upgate Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- README.md | 30 ++++++++++++++++++++++-------- common/common.cpp | 1 + ggml.c | 8 ++++++++ ggml.h | 1 + llama.cpp | 1 + 5 files changed, 33 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 48dcd6464..ca6d14e17 100644 --- a/README.md +++ b/README.md @@ -385,16 +385,30 @@ Building the program with BLAS support may lead to some performance improvements Check [BLIS.md](docs/BLIS.md) for more information. -- #### Intel MKL +- #### Intel oneMKL + - Using manual oneAPI installation: + By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps: + ```bash + mkdir build + cd build + source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-runtime docker image, only required for manual installation + cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON + cmake --build . --config Release + ``` - By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. You may also specify it by: + - Using oneAPI docker image: + If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-runtime](https://hub.docker.com/r/intel/oneapi-runtime) - ```bash - mkdir build - cd build - cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - cmake --build . --config Release - ``` + ```bash + mkdir build + cd build + cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON + cmake --build . --config Release + ``` + + Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. + + Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. - #### cuBLAS diff --git a/common/common.cpp b/common/common.cpp index b3425ab09..eacaee18e 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1394,6 +1394,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "build_number: %d\n", LLAMA_BUILD_NUMBER); fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false"); fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false"); + fprintf(stream, "cpu_has_avx_vnni: %s\n", ggml_cpu_has_avx_vnni() ? "true" : "false"); fprintf(stream, "cpu_has_avx2: %s\n", ggml_cpu_has_avx2() ? "true" : "false"); fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false"); fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false"); diff --git a/ggml.c b/ggml.c index a9e1ea9b4..bcec200f6 100644 --- a/ggml.c +++ b/ggml.c @@ -19638,6 +19638,14 @@ int ggml_cpu_has_avx(void) { #endif } +int ggml_cpu_has_avx_vnni(void) { +#if defined(__AVXVNNI__) + return 1; +#else + return 0; +#endif +} + int ggml_cpu_has_avx2(void) { #if defined(__AVX2__) return 1; diff --git a/ggml.h b/ggml.h index 67d6bc4f1..64f4e45e8 100644 --- a/ggml.h +++ b/ggml.h @@ -2198,6 +2198,7 @@ extern "C" { // GGML_API int ggml_cpu_has_avx (void); + GGML_API int ggml_cpu_has_avx_vnni (void); GGML_API int ggml_cpu_has_avx2 (void); GGML_API int ggml_cpu_has_avx512 (void); GGML_API int ggml_cpu_has_avx512_vbmi(void); diff --git a/llama.cpp b/llama.cpp index 68c7cced6..a833d4c15 100644 --- a/llama.cpp +++ b/llama.cpp @@ -10780,6 +10780,7 @@ const char * llama_print_system_info(void) { s = ""; s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; + s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | "; s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";