From 3ab3eca372bf617a99d11484fb00298ce74d4a8b Mon Sep 17 00:00:00 2001 From: Wenjing Yu Date: Fri, 26 Jul 2024 18:10:20 -0700 Subject: [PATCH] abstract rpc server --- .editorconfig | 5 - .gitignore | 22 +- CMakeLists.txt | 4 +- Makefile | 12 +- convert_hf_to_gguf.py | 3689 ----------------- convert_hf_to_gguf_update.py | 351 -- convert_llama_ggml_to_gguf.py | 450 -- convert_lora_to_gguf.py | 393 -- {examples => core}/CMakeLists.txt | 4 +- .../deprecation-warning/README.md | 0 .../deprecation-warning.cpp | 0 {examples => core}/main-cmake-pkg/.gitignore | 0 .../main-cmake-pkg/CMakeLists.txt | 0 {examples => core}/main-cmake-pkg/README.md | 0 {examples => core}/main/CMakeLists.txt | 0 {examples => core}/main/README.md | 0 {examples => core}/main/main.cpp | 4 +- {examples => core}/rpc/CMakeLists.txt | 0 {examples => core}/rpc/README.md | 0 {examples => core}/rpc/rpc-server.cpp | 0 {examples => core}/sycl/CMakeLists.txt | 0 {examples => core}/sycl/README.md | 0 {examples => core}/sycl/build.sh | 0 {examples => core}/sycl/ls-sycl-device.cpp | 0 {examples => core}/sycl/run-llama2.sh | 0 {examples => core}/sycl/win-build-sycl.bat | 0 {examples => core}/sycl/win-run-llama2.bat | 0 docs/android.md | 56 - docs/backend/BLIS.md | 67 - docs/backend/SYCL.md | 580 --- docs/build.md | 340 -- docs/development/HOWTO-add-model.md | 119 - docs/development/debugging-tests.md | 104 - docs/development/llama-star/idea-arch.key | Bin 488591 -> 0 bytes docs/development/llama-star/idea-arch.pdf | Bin 42334 -> 0 bytes .../token_generation_performance_tips.md | 40 - docs/docker.md | 86 - docs/install.md | 39 - examples/Miku.sh | 50 - examples/base-translate.sh | 61 - examples/chat-13B.bat | 57 - examples/chat-13B.sh | 41 - examples/chat-persistent.sh | 151 - examples/chat-vicuna.sh | 41 - examples/chat.sh | 16 - examples/convert_legacy_llama.py | 1440 ------- examples/json_schema_pydantic_example.py | 82 - examples/json_schema_to_grammar.py | 811 ---- examples/llama.vim | 135 - examples/llm.vim | 28 - examples/pydantic_models_to_grammar.py | 1322 ------ .../pydantic_models_to_grammar_examples.py | 312 -- examples/reason-act.sh | 16 - examples/regex_to_grammar.py | 20 - examples/server-llama2-13B.sh | 26 - examples/server_embd.py | 35 - examples/ts-type-to-grammar.sh | 28 - include/llama.h | 2 +- src/llama.cpp | 16 +- 59 files changed, 31 insertions(+), 11024 deletions(-) delete mode 100755 convert_hf_to_gguf.py delete mode 100755 convert_hf_to_gguf_update.py delete mode 100755 convert_llama_ggml_to_gguf.py delete mode 100755 convert_lora_to_gguf.py rename {examples => core}/CMakeLists.txt (82%) rename {examples => core}/deprecation-warning/README.md (100%) rename {examples => core}/deprecation-warning/deprecation-warning.cpp (100%) rename {examples => core}/main-cmake-pkg/.gitignore (100%) rename {examples => core}/main-cmake-pkg/CMakeLists.txt (100%) rename {examples => core}/main-cmake-pkg/README.md (100%) rename {examples => core}/main/CMakeLists.txt (100%) rename {examples => core}/main/README.md (100%) rename {examples => core}/main/main.cpp (99%) rename {examples => core}/rpc/CMakeLists.txt (100%) rename {examples => core}/rpc/README.md (100%) rename {examples => core}/rpc/rpc-server.cpp (100%) rename {examples => core}/sycl/CMakeLists.txt (100%) rename {examples => core}/sycl/README.md (100%) rename {examples => core}/sycl/build.sh (100%) rename {examples => core}/sycl/ls-sycl-device.cpp (100%) rename {examples => core}/sycl/run-llama2.sh (100%) rename {examples => core}/sycl/win-build-sycl.bat (100%) rename {examples => core}/sycl/win-run-llama2.bat (100%) delete mode 100644 docs/android.md delete mode 100644 docs/backend/BLIS.md delete mode 100644 docs/backend/SYCL.md delete mode 100644 docs/build.md delete mode 100644 docs/development/HOWTO-add-model.md delete mode 100644 docs/development/debugging-tests.md delete mode 100755 docs/development/llama-star/idea-arch.key delete mode 100644 docs/development/llama-star/idea-arch.pdf delete mode 100644 docs/development/token_generation_performance_tips.md delete mode 100644 docs/docker.md delete mode 100644 docs/install.md delete mode 100755 examples/Miku.sh delete mode 100755 examples/base-translate.sh delete mode 100644 examples/chat-13B.bat delete mode 100755 examples/chat-13B.sh delete mode 100755 examples/chat-persistent.sh delete mode 100755 examples/chat-vicuna.sh delete mode 100755 examples/chat.sh delete mode 100755 examples/convert_legacy_llama.py delete mode 100644 examples/json_schema_pydantic_example.py delete mode 100755 examples/json_schema_to_grammar.py delete mode 100644 examples/llama.vim delete mode 100644 examples/llm.vim delete mode 100644 examples/pydantic_models_to_grammar.py delete mode 100755 examples/pydantic_models_to_grammar_examples.py delete mode 100755 examples/reason-act.sh delete mode 100644 examples/regex_to_grammar.py delete mode 100755 examples/server-llama2-13B.sh delete mode 100644 examples/server_embd.py delete mode 100755 examples/ts-type-to-grammar.sh diff --git a/.editorconfig b/.editorconfig index 1a8840f9b..2e108a7df 100644 --- a/.editorconfig +++ b/.editorconfig @@ -21,9 +21,4 @@ indent_style = tab [prompts/*.txt] insert_final_newline = unset -[examples/server/public/*] -indent_size = 2 - -[examples/llama.swiftui/llama.swiftui.xcodeproj/*] -indent_style = tab diff --git a/.gitignore b/.gitignore index b5a055955..cfaf40397 100644 --- a/.gitignore +++ b/.gitignore @@ -87,19 +87,19 @@ ppl-*.txt qnt-*.txt perf-*.txt -# Examples +# core -examples/jeopardy/results.txt -examples/server/*.css.hpp -examples/server/*.html.hpp -examples/server/*.js.hpp -examples/server/*.mjs.hpp +core/jeopardy/results.txt +core/server/*.css.hpp +core/server/*.html.hpp +core/server/*.js.hpp +core/server/*.mjs.hpp !build_64.sh -!examples/*.bat -!examples/*/*.kts -!examples/*/*/*.kts -!examples/sycl/*.bat -!examples/sycl/*.sh +!core/*.bat +!core/*/*.kts +!core/*/*/*.kts +!core/sycl/*.bat +!core/sycl/*.sh # Python diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c829518f..f5d231229 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -183,12 +183,12 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc" DESTINATION lib/pkgconfig) # -# programs, examples +# programs, core # add_subdirectory(common) if (LLAMA_BUILD_EXAMPLES) - add_subdirectory(examples) + add_subdirectory(core) add_subdirectory(pocs) endif() diff --git a/Makefile b/Makefile index bd0ee5589..5058be94d 100644 --- a/Makefile +++ b/Makefile @@ -1065,7 +1065,7 @@ $(LIB_COMMON_S): \ clean: rm -vrf *.dot $(BUILD_TARGETS) rm -rvf src/*.o - rm -rvf examples/*.o + rm -rvf core/*.o rm -rvf common/*.o rm -rvf *.a rm -rvf *.dll @@ -1082,10 +1082,10 @@ clean: rm -rvf $(BUILD_TARGETS) rm -f vulkan-shaders-gen ggml/src/ggml-vulkan-shaders.hpp ggml/src/ggml-vulkan-shaders.cpp rm -rvf $(LEGACY_TARGETS_CLEAN) - find examples pocs -type f -name "*.o" -delete + find core pocs -type f -name "*.o" -delete # -# Examples +# core # # $< is the first prerequisite, i.e. the source file. @@ -1095,7 +1095,7 @@ clean: # Helper function that replaces .c, .cpp, and .cu file endings with .o: GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1)))) -llama-cli: examples/main/main.cpp \ +llama-cli: core/main/main.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1104,7 +1104,7 @@ llama-cli: examples/main/main.cpp \ @echo ifdef GGML_RPC -rpc-server: examples/rpc/rpc-server.cpp \ +rpc-server: core/rpc/rpc-server.cpp \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif # GGML_RPC @@ -1142,7 +1142,7 @@ llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ # NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate. # Eventually we will want to remove these target from building all the time. -main: examples/deprecation-warning/deprecation-warning.cpp +main: core/deprecation-warning/deprecation-warning.cpp $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @echo "NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead." diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py deleted file mode 100755 index 4087187c1..000000000 --- a/convert_hf_to_gguf.py +++ /dev/null @@ -1,3689 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -from __future__ import annotations - -import logging -import argparse -import contextlib -import json -import os -import re -import sys -from enum import IntEnum -from pathlib import Path -from hashlib import sha256 -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast - -import math -import numpy as np -import torch - -if TYPE_CHECKING: - from torch import Tensor - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -logger = logging.getLogger("hf-to-gguf") - - -###### MODEL DEFINITIONS ###### - -class SentencePieceTokenTypes(IntEnum): - NORMAL = 1 - UNKNOWN = 2 - CONTROL = 3 - USER_DEFINED = 4 - UNUSED = 5 - BYTE = 6 - - -AnyModel = TypeVar("AnyModel", bound="type[Model]") - - -class Model: - _model_classes: dict[str, type[Model]] = {} - - dir_model: Path - ftype: gguf.LlamaFileType - fname_out: Path - is_big_endian: bool - endianess: gguf.GGUFEndian - use_temp_file: bool - lazy: bool - part_names: list[str] - is_safetensors: bool - hparams: dict[str, Any] - block_count: int - tensor_map: gguf.TensorNameMap - tensor_names: set[str] | None - gguf_writer: gguf.GGUFWriter - model_name: str | None - metadata_override: Path | None - dir_model_card: Path - - # subclasses should define this! - model_arch: gguf.MODEL_ARCH - - def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool = False, - use_temp_file: bool = False, eager: bool = False, - metadata_override: Path | None = None, model_name: str | None = None, - split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False): - if type(self) is Model: - raise TypeError(f"{type(self).__name__!r} should not be directly instantiated") - - self.dir_model = dir_model - self.ftype = ftype - self.fname_out = fname_out - self.is_big_endian = is_big_endian - self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE - self.use_temp_file = use_temp_file - self.lazy = not eager - self.part_names = Model.get_model_part_names(self.dir_model, "model", ".safetensors") - self.is_safetensors = len(self.part_names) > 0 - if not self.is_safetensors: - self.part_names = Model.get_model_part_names(self.dir_model, "pytorch_model", ".bin") - self.hparams = Model.load_hparams(self.dir_model) - self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"]) - self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) - self.tensor_names = None - self.metadata_override = metadata_override - self.model_name = model_name - self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py - - # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type - if self.ftype == gguf.LlamaFileType.GUESSED: - # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. - _, first_tensor = next(self.get_tensors()) - if first_tensor.dtype == torch.float16: - logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_F16 - else: - logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_BF16 - - # Configure GGUF Writer - self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, - split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard) - - @classmethod - def __init_subclass__(cls): - # can't use an abstract property, because overriding it without type errors - # would require using decorated functions instead of simply defining the property - if "model_arch" not in cls.__dict__: - raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}") - - def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: - key = next((k for k in keys if k in self.hparams), None) - if key is not None: - return self.hparams[key] - if optional: - return None - raise KeyError(f"could not find any of: {keys}") - - def set_vocab(self): - self._set_vocab_gpt2() - - def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - tensor_names_from_parts: set[str] = set() - - if len(self.part_names) > 1: - self.tensor_names = set() - index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin" - index_name += ".index.json" - logger.info(f"gguf: loading model weight map from '{index_name}'") - with open(self.dir_model / index_name, "r", encoding="utf-8") as f: - index: dict[str, Any] = json.load(f) - weight_map = index.get("weight_map") - if weight_map is None or not isinstance(weight_map, dict): - raise ValueError(f"Can't load 'weight_map' from {index_name!r}") - self.tensor_names.update(weight_map.keys()) - else: - self.tensor_names = tensor_names_from_parts - - for part_name in self.part_names: - logger.info(f"gguf: loading model part '{part_name}'") - ctx: ContextManager[Any] - if self.is_safetensors: - from safetensors import safe_open - ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu")) - else: - ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) - - with ctx as model_part: - tensor_names_from_parts.update(model_part.keys()) - - for name in model_part.keys(): - if self.is_safetensors: - if self.lazy: - data = model_part.get_slice(name) - data = LazyTorchTensor.from_safetensors_slice(data) - else: - data = model_part.get_tensor(name) - else: - data = model_part[name] - if self.lazy: - data = LazyTorchTensor.from_eager(data) - yield name, data - - # only verify tensor name presence; it doesn't matter if they are not in the right files - if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0: - raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}") - - def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: - if key not in gguf.MODEL_TENSORS[self.model_arch]: - raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}") - name: str = gguf.TENSOR_NAMES[key] - if "{bid}" in name: - assert bid is not None - name = name.format(bid=bid) - return name + suffix - - def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool: - if key not in gguf.MODEL_TENSORS[self.model_arch]: - return False - key_name: str = gguf.TENSOR_NAMES[key] - if "{bid}" in key_name: - if bid is None: - return False - key_name = key_name.format(bid=bid) - else: - if bid is not None: - return False - return name == (key_name + suffix) - - def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str: - new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - return new_name - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.block_count) - - if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None: - self.gguf_writer.add_context_length(n_ctx) - logger.info(f"gguf: context length = {n_ctx}") - - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - self.gguf_writer.add_embedding_length(n_embd) - logger.info(f"gguf: embedding length = {n_embd}") - - if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: - self.gguf_writer.add_feed_forward_length(n_ff) - logger.info(f"gguf: feed forward length = {n_ff}") - - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - self.gguf_writer.add_head_count(n_head) - logger.info(f"gguf: head count = {n_head}") - - if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None: - self.gguf_writer.add_head_count_kv(n_head_kv) - logger.info(f"gguf: key-value head count = {n_head_kv}") - - if (rope_theta := self.hparams.get("rope_theta")) is not None: - self.gguf_writer.add_rope_freq_base(rope_theta) - logger.info(f"gguf: rope theta = {rope_theta}") - if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None: - self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps) - logger.info(f"gguf: rms norm epsilon = {f_rms_eps}") - if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None: - self.gguf_writer.add_layer_norm_eps(f_norm_eps) - logger.info(f"gguf: layer norm epsilon = {f_norm_eps}") - if (n_experts := self.hparams.get("num_local_experts")) is not None: - self.gguf_writer.add_expert_count(n_experts) - logger.info(f"gguf: expert count = {n_experts}") - if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None: - self.gguf_writer.add_expert_used_count(n_experts_used) - logger.info(f"gguf: experts used count = {n_experts_used}") - - if (head_dim := self.hparams.get("head_dim")) is not None: - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) - - self.gguf_writer.add_file_type(self.ftype) - logger.info(f"gguf: file type = {self.ftype}") - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - return [(self.map_tensor_name(name), data_torch)] - - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid, n_dims # unused - - return False - - def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid, n_dims # unused - - return False - - def prepare_tensors(self): - max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") - - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # use the first number-like part of the tensor name as the block id - bid = None - for part in name.split("."): - if part.isdecimal(): - bid = int(part) - break - - for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)): - data: np.ndarray # type hint - n_dims = len(data.shape) - data_dtype = data.dtype - data_qtype: gguf.GGMLQuantizationType | None = None - - # when both are True, f32 should win - extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims) - extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims) - - # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors - # Conditions should closely match those in llama_model_quantize_internal in llama.cpp - extra_f32 = any(cond for cond in ( - extra_f32, - n_dims == 1, - new_name.endswith("_norm.weight"), - )) - - # Some tensor types are always in float32 - extra_f32 = extra_f32 or any(self.match_model_tensor_name(new_name, key, bid) for key in ( - gguf.MODEL_TENSOR.FFN_GATE_INP, - gguf.MODEL_TENSOR.POS_EMBD, - gguf.MODEL_TENSOR.TOKEN_TYPES, - )) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - extra_f16 = any(cond for cond in ( - extra_f16, - (name.endswith(".weight") and n_dims >= 2), - )) - - if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32: - if self.ftype == gguf.LlamaFileType.MOSTLY_BF16: - data = gguf.quantize_bf16(data) - assert data.dtype == np.int16 - data_qtype = gguf.GGMLQuantizationType.BF16 - - elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data): - data = gguf.quantize_q8_0(data) - assert data.dtype == np.uint8 - data_qtype = gguf.GGMLQuantizationType.Q8_0 - - else: # default to float16 for quantized tensors - if data_dtype != np.float16: - data = data.astype(np.float16) - data_qtype = gguf.GGMLQuantizationType.F16 - - if data_qtype is None: # by default, convert to float32 - if data_dtype != np.float32: - data = data.astype(np.float32) - data_qtype = gguf.GGMLQuantizationType.F32 - - shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape - - # reverse shape to make it similar to the internal ggml dimension order - shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}" - - # n_dims is implicit in the shape - logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}") - - self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype) - - def set_type(self): - self.gguf_writer.add_type(gguf.GGUFType.MODEL) - - def prepare_metadata(self, vocab_only: bool): - - total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count() - - self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params) - - # Fallback to model directory name if metadata name is still missing - if self.metadata.name is None: - self.metadata.name = self.dir_model.name - - # Generate parameter weight class (useful for leader boards) if not yet determined - if self.metadata.size_label is None and total_params > 0: - self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count) - - # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' - output_type: str = self.ftype.name.partition("_")[2] - - # Filename Output - if self.fname_out.is_dir(): - # Generate default filename based on model specification and available metadata - if not vocab_only: - fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None) - else: - fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab") - - # Use the default filename - self.fname_out = self.fname_out / f"{fname_default}.gguf" - else: - # Output path is a custom defined templated filename - # Note: `not is_dir()` is used because `.is_file()` will not detect - # file template strings as it doesn't actually exist as a file - - # Process templated file name with the output ftype, useful with the "auto" ftype - self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type) - - self.set_type() - - logger.info("Set meta model") - self.metadata.set_gguf_meta_model(self.gguf_writer) - - logger.info("Set model parameters") - self.set_gguf_parameters() - - logger.info("Set model tokenizer") - self.set_vocab() - - logger.info("Set model quantization version") - self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION) - - def write(self): - self.prepare_tensors() - self.prepare_metadata(vocab_only=False) - self.gguf_writer.write_header_to_file(path=self.fname_out) - self.gguf_writer.write_kv_data_to_file() - self.gguf_writer.write_tensors_to_file(progress=True) - self.gguf_writer.close() - - def write_vocab(self): - if len(self.gguf_writer.tensors) != 1: - raise ValueError('Splitting the vocabulary is not supported') - - self.prepare_metadata(vocab_only=True) - self.gguf_writer.write_header_to_file(path=self.fname_out) - self.gguf_writer.write_kv_data_to_file() - self.gguf_writer.close() - - @staticmethod - def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]: - part_names: list[str] = [] - for filename in os.listdir(dir_model): - if filename.startswith(prefix) and filename.endswith(suffix): - part_names.append(filename) - - part_names.sort() - - return part_names - - @staticmethod - def load_hparams(dir_model: Path): - with open(dir_model / "config.json", "r", encoding="utf-8") as f: - return json.load(f) - - @classmethod - def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: - assert names - - def func(modelcls: AnyModel) -> AnyModel: - for name in names: - cls._model_classes[name] = modelcls - return modelcls - return func - - @classmethod - def from_model_architecture(cls, arch: str) -> type[Model]: - try: - return cls._model_classes[arch] - except KeyError: - raise NotImplementedError(f'Architecture {arch!r} not supported!') from None - - def does_token_look_special(self, token: str | bytes) -> bool: - if isinstance(token, (bytes, bytearray)): - token_text = token.decode(encoding="utf-8") - elif isinstance(token, memoryview): - token_text = token.tobytes().decode(encoding="utf-8") - else: - token_text = token - - # Some models mark some added tokens which ought to be control tokens as not special. - # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2}) - seems_special = token_text in ( - "", # deepseek-coder - "", "<2mass>", "[@BOS@]", # gemma{,-2} - ) - - seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) - seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder - - # TODO: should these be marked as UNUSED instead? (maybe not) - seems_special = seems_special or (token_text.startswith("")) # gemma{,-2} - - return seems_special - - # used for GPT-2 BPE and WordPiece vocabs - def get_vocab_base(self) -> tuple[list[str], list[int], str]: - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(self.dir_model) - vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab)) - assert max(tokenizer.vocab.values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} - added_vocab = tokenizer.get_added_vocab() - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - else: - token: str = reverse_vocab[i] - if token in added_vocab: - if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): - toktypes.append(gguf.TokenType.CONTROL) - else: - token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - toktypes.append(gguf.TokenType.NORMAL) - tokens.append(token) - - return tokens, toktypes, tokpre - - # NOTE: this function is generated by convert_hf_to_gguf_update.py - # do not modify it manually! - # ref: https://github.com/ggerganov/llama.cpp/pull/6920 - # Marker: Start get_vocab_base_pre - def get_vocab_base_pre(self, tokenizer) -> str: - # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that - # is specific for the BPE pre-tokenizer used by the model - # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer - - chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' - - chktok = tokenizer.encode(chktxt) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.debug(f"chktok: {chktok}") - logger.debug(f"chkhsh: {chkhsh}") - - res = None - - # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script - # or pull the latest version of the model from Huggingface - # don't edit the hashes manually! - if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5": - # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B - res = "llama-bpe" - if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754": - # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base - res = "deepseek-llm" - if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821": - # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base - res = "deepseek-coder" - if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": - # ref: https://huggingface.co/tiiuae/falcon-7b - res = "falcon" - if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": - # ref: https://huggingface.co/BAAI/bge-small-en-v1.5 - res = "bert-bge" - if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166": - # ref: https://huggingface.co/mosaicml/mpt-7b - res = "mpt" - if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34": - # ref: https://huggingface.co/bigcode/starcoder2-3b - res = "starcoder" - if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454": - # ref: https://huggingface.co/openai-community/gpt2 - res = "gpt-2" - if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3": - # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b - res = "stablelm2" - if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff": - # ref: https://huggingface.co/smallcloudai/Refact-1_6-base - res = "refact" - if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8": - # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01 - res = "command-r" - if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea": - # ref: https://huggingface.co/Qwen/Qwen1.5-7B - res = "qwen2" - if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166": - # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf - res = "olmo" - if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e": - # ref: https://huggingface.co/databricks/dbrx-base - res = "dbrx" - if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en - res = "jina-v2-en" - if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es - res = "jina-v2-es" - if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de - res = "jina-v2-de" - if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d": - # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct - res = "smaug-bpe" - if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360": - # ref: https://huggingface.co/LumiOpen/Poro-34B-chat - res = "poro-chat" - if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code - res = "jina-v2-code" - if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b": - # ref: https://huggingface.co/THUDM/glm-4-9b-chat - res = "chatglm-bpe" - if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": - # ref: https://huggingface.co/LumiOpen/Viking-7B - res = "viking" - if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901": - # ref: https://huggingface.co/core42/jais-13b - res = "jais" - if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f": - # ref: https://huggingface.co/WisdomShell/CodeShell-7B - res = "codeshell" - if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e": - # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407 - res = "tekken" - if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249": - # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M - res = "smollm" - - if res is None: - logger.warning("\n") - logger.warning("**************************************************************************************") - logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!") - logger.warning("** There are 2 possible reasons for this:") - logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") - logger.warning("** - the pre-tokenization config has changed upstream") - logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") - logger.warning("**") - logger.warning(f"** chkhsh: {chkhsh}") - logger.warning("**************************************************************************************") - logger.warning("\n") - raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()") - - logger.debug(f"tokenizer.ggml.pre: {repr(res)}") - logger.debug(f"chkhsh: {chkhsh}") - - return res - # Marker: End get_vocab_base_pre - - def _set_vocab_gpt2(self) -> None: - tokens, toktypes, tokpre = self.get_vocab_base() - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_qwen(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[QwenModel.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) == 2 - merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged))) - - # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined - added_vocab = tokenizer.special_tokens - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.CONTROL) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - # only add special tokens when they were not already loaded from config.json - if len(special_vocab.special_token_ids) == 0: - special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) - # this one is usually not in config.json anyway - special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_sentencepiece(self, add_to_gguf=True): - tokens, scores, toktypes = self._create_vocab_sentencepiece() - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def _create_vocab_sentencepiece(self): - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, token_data in added_tokens_decoder.items(): - token_id = int(token_id) - token: str = token_data["content"] - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token.encode("utf-8"): - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}') - if token_data.get("special") or self.does_token_look_special(token): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - else: - token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - scores[token_id] = -1000.0 - tokens[token_id] = token.encode("utf-8") - - if vocab_size > len(tokens): - pad_count = vocab_size - len(tokens) - logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") - for i in range(1, pad_count + 1): - tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.UNUSED) - - return tokens, scores, toktypes - - def _set_vocab_llama_hf(self): - vocab = gguf.LlamaHfVocab(self.dir_model) - tokens = [] - scores = [] - toktypes = [] - - for text, score, toktype in vocab.all_tokens(): - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - assert len(tokens) == vocab.vocab_size - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): - tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf" - logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'") - vocab_reader = gguf.GGUFReader(tokenizer_path, "r") - - default_pre = "mpt" if model_name == "gpt-neox" else "default" - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL) - assert field # tokenizer model - self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8")) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE) - self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST) - assert field # token list - self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size]) - - if model_name == "llama-spm": - field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES) - assert field # token scores - self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE) - assert field # token types - self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) - - if model_name != "llama-spm": - field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES) - assert field # token merges - self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data]) - - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None: - self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None: - self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None: - self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None: - self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None: - self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None: - self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0]) - - -@Model.register("GPTNeoXForCausalLM") -class GPTNeoXModel(Model): - model_arch = gguf.MODEL_ARCH.GPTNEOX - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count( - int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])), - ) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True)) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - - tensors: list[tuple[str, Tensor]] = [] - - if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) - data_torch = torch.cat( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.weight") - elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name): - qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) - data_torch = torch.cat( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.bias") - - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors - - -@Model.register("BloomForCausalLM") -class BloomModel(Model): - model_arch = gguf.MODEL_ARCH.BLOOM - - def set_gguf_parameters(self): - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) - self.gguf_writer.add_embedding_length(n_embed) - self.gguf_writer.add_feed_forward_length(4 * n_embed) - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - - name = re.sub(r'transformer\.', '', name) - - tensors: list[tuple[str, Tensor]] = [] - - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) - data_torch = torch.cat( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.weight") - elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): - qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) - data_torch = torch.cat( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.bias") - - tensors.append((self.map_tensor_name(name), data_torch)) - - if name == "word_embeddings.weight": - assert self.tensor_names is not None - - # TODO: tie them at runtime, don't duplicate in the model file - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("MPTForCausalLM") -class MPTModel(Model): - model_arch = gguf.MODEL_ARCH.MPT - - def set_vocab(self): - try: - self._set_vocab_gpt2() - except Exception: - # Fallback for SEA-LION model - self._set_vocab_sentencepiece() - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_pad_token_id(3) - self.gguf_writer.add_eos_token_id(1) - self.gguf_writer.add_unk_token_id(0) - - def set_gguf_parameters(self): - block_count = self.hparams["n_layers"] - self.gguf_writer.add_context_length(self.hparams["max_seq_len"]) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"]) - self.gguf_writer.add_head_count(self.hparams["n_heads"]) - if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"): - self.gguf_writer.add_head_count_kv(kv_n_heads) - self.gguf_writer.add_layer_norm_eps(1e-5) - if self.hparams["attn_config"]["clip_qkv"] is not None: - self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"]) - if self.hparams["attn_config"]["alibi"]: - self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"]) - else: - self.gguf_writer.add_max_alibi_bias(0.0) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - if "scales" in name: - new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales")) - new_name = new_name.replace("scales", "act.scales") - else: - new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias")) - - return [(new_name, data_torch)] - - -@Model.register("OrionForCausalLM") -class OrionModel(Model): - model_arch = gguf.MODEL_ARCH.ORION - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - # note: config provides rms norm but it is actually layer norm - # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571 - self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"]) - - -@Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM") -class BaichuanModel(Model): - model_arch = gguf.MODEL_ARCH.BAICHUAN - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - tensors: list[tuple[str, Tensor]] = [] - - if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight": - logger.info(f"Unpacking and permuting layer {bid}") - tensors = [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), - self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), - self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), - self._reverse_hf_part(data_torch, 2)), - ] - else: - tensors = [(self.map_tensor_name(name), data_torch)] - - return tensors - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - def _reverse_hf_permute_part( - self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None, - ) -> Tensor: - r = weights.shape[0] // 3 - return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv) - - def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor: - r = weights.shape[0] // 3 - return weights[r * n_part:r * n_part + r, ...] - - -@Model.register("XverseForCausalLM") -class XverseModel(Model): - model_arch = gguf.MODEL_ARCH.XVERSE - - def set_vocab(self): - assert (self.dir_model / "tokenizer.json").is_file() - dir_model = self.dir_model - hparams = self.hparams - - tokens: list[bytes] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model) - vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) - # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size, - # because vocab_size is the count of items, and indexes start at 0. - max_vocab_index = max(tokenizer.get_vocab().values()) - if max_vocab_index >= vocab_size: - raise ValueError("Vocabulary size exceeds expected maximum size.") - - reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} - added_vocab = tokenizer.get_added_vocab() - - for token_id in range(vocab_size): - token_text = reverse_vocab[token_id].encode('utf-8') - # replace "\x00" to string with length > 0 - if token_text == b"\x00": - toktype = gguf.TokenType.BYTE # special - token_text = f"<{token_text}>".encode('utf-8') - elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text): - toktype = gguf.TokenType.BYTE # special - elif reverse_vocab[token_id] in added_vocab: - if tokenizer.added_tokens_decoder[token_id].special: - toktype = gguf.TokenType.CONTROL - else: - toktype = gguf.TokenType.USER_DEFINED - else: - toktype = gguf.TokenType.NORMAL - - tokens.append(token_text) - toktypes.append(toktype) - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith("q_proj.weight"): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) - if name.endswith("k_proj.weight"): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - - return [(self.map_tensor_name(name), data_torch)] - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - -@Model.register("FalconForCausalLM", "RWForCausalLM") -class FalconModel(Model): - model_arch = gguf.MODEL_ARCH.FALCON - - def set_gguf_parameters(self): - block_count = self.hparams.get("num_hidden_layers") - if block_count is None: - block_count = self.hparams["n_layer"] # old name - - n_head = self.hparams.get("num_attention_heads") - if n_head is None: - n_head = self.hparams["n_head"] # old name - - n_head_kv = self.hparams.get("num_kv_heads") - if n_head_kv is None: - n_head_kv = self.hparams.get("n_head_kv", 1) # old name - - self.gguf_writer.add_context_length(2048) # not in config.json - self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # QKV tensor transform - # The original query_key_value tensor contains n_head_kv "kv groups", - # each consisting of n_head/n_head_kv query weights followed by one key - # and one value weight (shared by all query heads in the kv group). - # This layout makes it a big pain to work with in GGML. - # So we rearrange them here,, so that we have n_head query weights - # followed by n_head_kv key weights followed by n_head_kv value weights, - # in contiguous fashion. - # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - - if "query_key_value" in name: - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1 - head_dim = self.hparams["hidden_size"] // n_head - - qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) - q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) - k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) - v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) - data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("GPTBigCodeForCausalLM") -class StarCoderModel(Model): - model_arch = gguf.MODEL_ARCH.STARCODER - - def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] - - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(1) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - -@Model.register("GPTRefactForCausalLM") -class RefactModel(Model): - model_arch = gguf.MODEL_ARCH.REFACT - - def set_vocab(self): - super().set_vocab() - - # TODO: how to determine special FIM tokens automatically? - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'eot']) - special_vocab._set_special_token("prefix", 1) - special_vocab._set_special_token("suffix", 3) - special_vocab._set_special_token("middle", 2) - special_vocab.chat_template = None # do not add it twice - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - hidden_dim = self.hparams["n_embd"] - inner_dim = 4 * hidden_dim - hidden_dim = int(2 * inner_dim / 3) - multiple_of = 256 - ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - - block_count = self.hparams["n_layer"] - - # refact uses Alibi. So this is from config.json which might be used by training. - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - - self.gguf_writer.add_feed_forward_length(ff_dim) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(1) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - hidden_dim = self.hparams["n_embd"] - inner_dim = 4 * hidden_dim - hidden_dim = int(2 * inner_dim / 3) - multiple_of = 256 - ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - n_head = self.hparams["n_head"] - n_head_kv = 1 - head_dim = self.hparams["n_embd"] // n_head - - tensors: list[tuple[str, Tensor]] = [] - - if bid is not None: - if name == f"transformer.h.{bid}.attn.kv.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:])) - elif name == f"transformer.h.{bid}.attn.q.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch)) - elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])) - - if len(tensors) == 0: - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors - - -@Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM") -class StableLMModel(Model): - model_arch = gguf.MODEL_ARCH.STABLELM - - def set_vocab(self): - if (self.dir_model / "tokenizer.json").is_file(): - self._set_vocab_gpt2() - else: - # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab - self._set_vocab_qwen() - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"]) - self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"]))) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"]) - self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) - self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) - self.gguf_writer.add_file_type(self.ftype) - - _q_norms: list[dict[str, Tensor]] | None = None - _k_norms: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams["num_key_value_heads"] - - if name.find("q_layernorm.norms") != -1: - assert bid is not None - - if self._q_norms is None: - self._q_norms = [{} for _ in range(self.block_count)] - - self._q_norms[bid][name] = data_torch - - if len(self._q_norms[bid]) >= n_head: - return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm") - else: - return [] - - if name.find("k_layernorm.norms") != -1: - assert bid is not None - - if self._k_norms is None: - self._k_norms = [{} for _ in range(self.block_count)] - - self._k_norms[bid][name] = data_torch - - if len(self._k_norms[bid]) >= n_kv_head: - return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm") - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): - datas: list[Tensor] = [] - # extract the norms in order - for xid in range(n_head): - ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" - datas.append(norms[ename]) - del norms[ename] - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" - new_name = self.map_tensor_name(merged_name) - - return [(new_name, data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._q_norms is not None or self._k_norms is not None: - # flatten two `list[dict[str, Tensor]]` into a single `list[str]` - norms = ( - [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else [] - ) + ( - [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else [] - ) - if len(norms) > 0: - raise ValueError(f"Unprocessed norms: {norms}") - - -@Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") -class LlamaModel(Model): - model_arch = gguf.MODEL_ARCH.LLAMA - - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - try: - self._set_vocab_llama_hf() - except (FileNotFoundError, TypeError): - # Llama 3 - self._set_vocab_gpt2() - - # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256) - if self.hparams.get("vocab_size", 32000) == 32016: - special_vocab = gguf.SpecialVocab( - self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'eot'] - ) - special_vocab._set_special_token("prefix", 32007) - special_vocab._set_special_token("suffix", 32008) - special_vocab._set_special_token("middle", 32009) - special_vocab._set_special_token("eot", 32010) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: - rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] - self.gguf_writer.add_rope_dimension_count(rope_dim) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - if "add_prefix_space" in tokenizer_config_json: - self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"]) - - # Apply to granite small models only - if self.hparams.get("vocab_size", 32000) == 49152: - self.gguf_writer.add_add_bos_token(False) - - @staticmethod - def permute(weights: Tensor, n_head: int, n_head_kv: int | None): - if n_head_kv is not None and n_head != n_head_kv: - n_head = n_head_kv - return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape)) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith(("q_proj.weight", "q_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight", "k_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["w1", "w2", "w3"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("BitnetForCausalLM") -class BitnetModel(Model): - model_arch = gguf.MODEL_ARCH.BITNET - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(1.0) - - def weight_quant(self, weight): - dtype = weight.dtype - weight = weight.float() - s = 1 / weight.abs().mean().clamp(min=1e-5) - weight = (weight * s).round().clamp(-1, 1) / s - scale = weight.abs().max().unsqueeze(0) - weight = torch.where(weight.abs().less(1e-6), 0, weight).type(dtype) - weight = torch.sign(weight).type(dtype) - return weight.type(dtype), scale.type(torch.float32) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - new_name = self.map_tensor_name(name) - - if any(self.match_model_tensor_name(new_name, key, bid) for key in [ - gguf.MODEL_TENSOR.ATTN_Q, - gguf.MODEL_TENSOR.ATTN_K, - gguf.MODEL_TENSOR.ATTN_V, - gguf.MODEL_TENSOR.ATTN_OUT, - gguf.MODEL_TENSOR.FFN_UP, - gguf.MODEL_TENSOR.FFN_DOWN, - gguf.MODEL_TENSOR.FFN_GATE, - ]): - # transform weight into 1/0/-1 (in fp32) - weight_torch, scale_torch = self.weight_quant(data_torch) - yield (new_name, weight_torch) - yield (new_name.removesuffix(".weight") + ".scale", scale_torch) - else: - yield (new_name, data_torch) - - -@Model.register("GrokForCausalLM") -class GrokModel(Model): - model_arch = gguf.MODEL_ARCH.GROK - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find(".moe.") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["linear", "linear_1", "linear_v"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("DbrxForCausalLM") -class DbrxModel(Model): - model_arch = gguf.MODEL_ARCH.DBRX - - def set_gguf_parameters(self): - ffn_config = self.hparams["ffn_config"] - attn_config = self.hparams["attn_config"] - self.gguf_writer.add_block_count(self.hparams["n_layers"]) - - self.gguf_writer.add_context_length(self.hparams["max_seq_len"]) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"]) - - self.gguf_writer.add_head_count(self.hparams["n_heads"]) - self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"]) - - self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"]) - - self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"]) - - self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"]) - self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"]) - - self.gguf_writer.add_layer_norm_eps(1e-5) - - self.gguf_writer.add_file_type(self.ftype) - logger.info(f"gguf: file type = {self.ftype}") - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_expert = self.hparams["ffn_config"]["moe_num_experts"] - n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] - n_embd = self.hparams["d_model"] - - # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose - # original implementation expects (n_expert, n_ff, n_embd) for all experts weights - # But llama.cpp moe graph works differently - # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions - # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor - exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} - "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - experts = False - - for exp_tensor_name in exp_tensor_names.keys(): - if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: - experts = True - data_torch = data_torch.view(n_expert, n_ff, n_embd) - if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: - data_torch = data_torch.permute(*permute_tensor) - break - - # map tensor names - # In MoE models the ffn tensors are typically most of the model weights, - # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. - # Every other model has the weight names ending in .weight, - # let's assume that is the convention which is not the case for dbrx: - # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 - new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - - return [(new_name, data_torch)] - - def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid # unused - - return n_dims > 1 - - -@Model.register("MiniCPMForCausalLM") -class MiniCPMModel(Model): - model_arch = gguf.MODEL_ARCH.MINICPM - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - def set_vocab(self): - self._set_vocab_llama_hf() - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("QWenLMHeadModel") -class QwenModel(Model): - model_arch = gguf.MODEL_ARCH.QWEN - - @staticmethod - def token_bytes_to_string(b): - from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode - byte_encoder = bytes_to_unicode() - return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) - - @staticmethod - def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]: - parts = [bytes([b]) for b in token] - while True: - min_idx = None - min_rank = None - for i, pair in enumerate(zip(parts[:-1], parts[1:])): - rank = mergeable_ranks.get(pair[0] + pair[1]) - if rank is not None and (min_rank is None or rank < min_rank): - min_idx = i - min_rank = rank - if min_rank is None or (max_rank is not None and min_rank >= max_rank): - break - assert min_idx is not None - parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:] - return parts - - def set_vocab(self): - self._set_vocab_qwen() - - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - -@Model.register("Qwen2ForCausalLM") -class Qwen2Model(Model): - model_arch = gguf.MODEL_ARCH.QWEN2 - - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - self._set_vocab_gpt2() - - -@Model.register("Qwen2MoeForCausalLM") -class Qwen2MoeModel(Model): - model_arch = gguf.MODEL_ARCH.QWEN2MOE - - def set_gguf_parameters(self): - super().set_gguf_parameters() - if (n_experts := self.hparams.get("num_experts")) is not None: - self.gguf_writer.add_expert_count(n_experts) - if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None: - self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size) - logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}") - if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None: - self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size) - logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}") - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find("experts") != -1: - n_experts = self.hparams["num_experts"] - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for w_name in ["down_proj", "gate_proj", "up_proj"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("GPT2LMHeadModel") -class GPT2Model(Model): - model_arch = gguf.MODEL_ARCH.GPT2 - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_context_length(self.hparams["n_ctx"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - - # we don't need these - if name.endswith((".attn.bias", ".attn.masked_bias")): - return tensors - - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): - data_torch = data_torch.transpose(1, 0) - - new_name = self.map_tensor_name(name) - - tensors.append((new_name, data_torch)) - - # note: GPT2 output is tied to (same as) wte in original model - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("PhiForCausalLM") -class Phi2Model(Model): - model_arch = gguf.MODEL_ARCH.PHI2 - - def set_gguf_parameters(self): - block_count = self.find_hparam(["num_hidden_layers", "n_layer"]) - - rot_pct = self.find_hparam(["partial_rotary_factor"]) - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - - self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"])) - - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(4 * n_embd) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head) - self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"])) - self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_add_bos_token(False) - - -@Model.register("Phi3ForCausalLM") -class Phi3MiniModel(Model): - model_arch = gguf.MODEL_ARCH.PHI3 - - def set_vocab(self): - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - raise ValueError(f'Error: Missing {tokenizer_path}') - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, foken_data in added_tokens_decoder.items(): - token_id = int(token_id) - token = foken_data["content"].encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - tokenizer_file = self.dir_model / 'tokenizer.json' - if tokenizer_file.is_file(): - with open(tokenizer_file, "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) - added_tokens = tokenizer_json.get("added_tokens", []) - for foken_data in added_tokens: - token_id = int(foken_data["id"]) - token = foken_data["content"].encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - block_count = self.find_hparam(["num_hidden_layers", "n_layer"]) - - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"]) - rms_eps = self.find_hparam(["rms_norm_eps"]) - max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"]) - orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"]) - rope_dims = n_embd // n_head - - self.gguf_writer.add_context_length(max_pos_embds) - self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds) - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"])) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_rms_eps(rms_eps) - self.gguf_writer.add_rope_dimension_count(rope_dims) - self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"])) - - # write rope scaling for long context (128k) model - rope_scaling = self.find_hparam(['rope_scaling'], True) - if rope_scaling is None: - return - - scale = max_pos_embds / orig_max_pos_embds - - rope_scaling_type = rope_scaling.get('type', '').lower() - if len(rope_scaling_type) == 0: - raise KeyError('Missing the required key rope_scaling.type') - - if rope_scaling_type == 'su' or rope_scaling_type == 'longrope': - attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0 - elif rope_scaling_type == 'yarn': - attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0 - else: - raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet') - - self.gguf_writer.add_rope_scaling_attn_factors(attn_factor) - - long_factors = rope_scaling.get('long_factor', None) - short_factors = rope_scaling.get('short_factor', None) - - if long_factors is None or short_factors is None: - raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor') - - if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2: - raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}') - - self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_LONG] + ".weight", np.array(long_factors, dtype=np.float32)) - self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT] + ".weight", np.array(short_factors, dtype=np.float32)) - - -@Model.register("PlamoForCausalLM") -class PlamoModel(Model): - model_arch = gguf.MODEL_ARCH.PLAMO - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(4096) # not in config.json - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong - self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - def shuffle_attn_q_weight(self, data_torch): - assert data_torch.size() == (5120, 5120) - data_torch = data_torch.reshape(8, 5, 128, 5120) - data_torch = torch.permute(data_torch, (1, 0, 2, 3)) - data_torch = torch.reshape(data_torch, (5120, 5120)) - return data_torch - - def shuffle_attn_output_weight(self, data_torch): - assert data_torch.size() == (5120, 5120) - data_torch = data_torch.reshape(5120, 8, 5, 128) - data_torch = torch.permute(data_torch, (0, 2, 1, 3)) - data_torch = torch.reshape(data_torch, (5120, 5120)) - return data_torch - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - new_name = self.map_tensor_name(name) - - # shuffle for broadcasting of gqa in ggml_mul_mat - if new_name.endswith("attn_q.weight"): - data_torch = self.shuffle_attn_q_weight(data_torch) - elif new_name.endswith("attn_output.weight"): - data_torch = self.shuffle_attn_output_weight(data_torch) - - return [(new_name, data_torch)] - - -@Model.register("CodeShellForCausalLM") -class CodeShellModel(Model): - model_arch = gguf.MODEL_ARCH.CODESHELL - - def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] - - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_freq_base(10000.0) - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(1.0) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - new_name = self.map_tensor_name(name) - - tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)] - - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - assert self.tensor_names is not None - - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - # copy tok_embd.weight to output.weight - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("InternLM2ForCausalLM") -class InternLM2Model(Model): - model_arch = gguf.MODEL_ARCH.INTERNLM2 - - def set_vocab(self): - # (TODO): Is there a better way? - # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character - # \x00 specially and convert it into an emoji character to prevent it from being mistakenly - # recognized as an empty string in C++. - from sentencepiece import SentencePieceProcessor - from sentencepiece import sentencepiece_model_pb2 as model - - tokenizer_path = self.dir_model / 'tokenizer.model' - - tokens: list[bytes] = [] - scores: list[float] = [] - toktypes: list[int] = [] - - if not tokenizer_path.is_file(): - logger.error(f'Error: Missing {tokenizer_path}') - sys.exit(1) - - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - for token_id in range(vocab_size): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - if text == b"\x00": - # (TODO): fixme - # Hack here and replace the \x00 characters. - logger.warning(f"InternLM2 convert token '{text}' to '🐉'!") - text = "🐉".encode("utf-8") - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - # take care of ununsed raw token - if piece.startswith('[UNUSED'): - toktype = SentencePieceTokenTypes.UNUSED - - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - - for key in added_tokens_json: - tokens.append(key.encode("utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) - - chat_eos_token = '<|im_end|>' - chat_eos_token_id = None - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, foken_data in added_tokens_decoder.items(): - token_id = int(token_id) - token = foken_data["content"] - if token == chat_eos_token: - chat_eos_token_id = token_id - token = token.encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - tokenizer_file = self.dir_model / 'tokenizer.json' - if tokenizer_file.is_file(): - with open(tokenizer_file, "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) - added_tokens = tokenizer_json.get("added_tokens", []) - for foken_data in added_tokens: - token_id = int(foken_data["id"]) - token = foken_data["content"] - if token == chat_eos_token: - chat_eos_token_id = token_id - token = token.encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - self.gguf_writer.add_add_space_prefix(add_prefix) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - old_eos = special_vocab.special_token_ids["eos"] - if chat_eos_token_id is not None: - # For the chat model, we replace the eos with '<|im_end|>'. - # TODO: this is a hack, should be fixed - # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048 - special_vocab.special_token_ids["eos"] = chat_eos_token_id - logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}" - " in chat mode so that the conversation can end normally.") - - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_file_type(self.ftype) - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - num_heads = self.hparams["num_attention_heads"] - num_kv_heads = self.hparams["num_key_value_heads"] - n_embd = self.hparams["hidden_size"] - q_per_kv = num_heads // num_kv_heads - head_dim = n_embd // num_heads - num_groups = num_heads // q_per_kv - - if bid is not None and f"model.layers.{bid}.attention.wqkv" in name: - qkv = data_torch - - qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd)) - q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1] - - # The model weights of q and k equire additional reshape. - q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads) - k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads) - v = v.reshape((-1, v.shape[-1])) - - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v), - ] - else: - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("BertModel", "CamembertModel") -class BertModel(Model): - model_arch = gguf.MODEL_ARCH.BERT - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.vocab_size = None - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_causal_attention(False) - - # get pooling path - pooling_path = None - module_path = self.dir_model / "modules.json" - if module_path.is_file(): - with open(module_path, encoding="utf-8") as f: - modules = json.load(f) - for mod in modules: - if mod["type"] == "sentence_transformers.models.Pooling": - pooling_path = mod["path"] - break - - # get pooling type - if pooling_path is not None: - with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f: - pooling = json.load(f) - if pooling["pooling_mode_mean_tokens"]: - pooling_type = gguf.PoolingType.MEAN - elif pooling["pooling_mode_cls_token"]: - pooling_type = gguf.PoolingType.CLS - else: - raise NotImplementedError("Only MEAN and CLS pooling types supported") - self.gguf_writer.add_pooling_type(pooling_type) - - def set_vocab(self): - tokens, toktypes, tokpre = self.get_vocab_base() - self.vocab_size = len(tokens) - - # we need this to validate the size of the token_type embeddings - # though currently we are passing all zeros to the token_type embeddings - self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B" - - # convert to phantom space vocab - def phantom(tok): - if tok.startswith("[") and tok.endswith("]"): - return tok - if tok.startswith("##"): - return tok[2:] - return "\u2581" + tok - tokens = list(map(phantom, tokens)) - - # add vocab to gguf - self.gguf_writer.add_tokenizer_model("bert") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - # handle special tokens - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # we are only using BERT for embeddings so we don't need the pooling layer - if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): - return [] # we don't need these - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("NomicBertModel") -class NomicBertModel(BertModel): - model_arch = gguf.MODEL_ARCH.NOMIC_BERT - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # the HF config claims n_ctx=8192, but it uses RoPE scaling - self.hparams["n_ctx"] = 2048 - - # SwigLU activation - assert self.hparams["activation_function"] == "swiglu" - # this doesn't do anything in the HF version - assert self.hparams["causal"] is False - # no bias tensors - assert self.hparams["qkv_proj_bias"] is False - assert self.hparams["mlp_fc1_bias"] is False - assert self.hparams["mlp_fc2_bias"] is False - # norm at end of layer - assert self.hparams["prenorm"] is False - # standard RoPE - assert self.hparams["rotary_emb_fraction"] == 1.0 - assert self.hparams["rotary_emb_interleaved"] is False - assert self.hparams["rotary_emb_scale_base"] is None - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) - - -@Model.register("GemmaForCausalLM") -class GemmaModel(Model): - model_arch = gguf.MODEL_ARCH.GEMMA - - def set_vocab(self): - self._set_vocab_sentencepiece() - - # TODO: these special tokens should be exported only for the CodeGemma family - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot']) - special_vocab._set_special_token("prefix", 67) - special_vocab._set_special_token("suffix", 69) - special_vocab._set_special_token("middle", 68) - special_vocab._set_special_token("fsep", 70) - special_vocab._set_special_token("eot", 107) - special_vocab.chat_template = None # do not add it twice - special_vocab.add_to_gguf(self.gguf_writer) - - self.gguf_writer.add_add_space_prefix(False) - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_key_length(hparams["head_dim"]) - self.gguf_writer.add_value_length(hparams["head_dim"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("Gemma2ForCausalLM") -class Gemma2Model(Model): - model_arch = gguf.MODEL_ARCH.GEMMA2 - - def set_vocab(self): - self._set_vocab_sentencepiece() - - self.gguf_writer.add_add_space_prefix(False) - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_key_length(hparams["head_dim"]) - self.gguf_writer.add_value_length(hparams["head_dim"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_attn_logit_softcapping( - self.hparams["attn_logit_softcapping"] - ) - self.gguf_writer.add_final_logit_softcapping( - self.hparams["final_logit_softcapping"] - ) - self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("Starcoder2ForCausalLM") -class StarCoder2Model(Model): - model_arch = gguf.MODEL_ARCH.STARCODER2 - - -@Model.register("MambaForCausalLM", "MambaLMHeadModel") -class MambaModel(Model): - model_arch = gguf.MODEL_ARCH.MAMBA - - def set_vocab(self): - vocab_size = self.hparams["vocab_size"] - # Round vocab size to next multiple of 8 - pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8) - # pad using ceiling division - # ref: https://stackoverflow.com/a/17511341/22827863 - vocab_size = -(vocab_size // -pad_vocab) * pad_vocab - self.hparams["vocab_size"] = vocab_size - - if (self.dir_model / "tokenizer.json").is_file(): - self._set_vocab_gpt2() - elif (self.dir_model / "tokenizer.model").is_file(): - self._set_vocab_sentencepiece() - else: - # Use the GPT-NeoX tokenizer when no tokenizer files are present - self._set_vocab_builtin("gpt-neox", vocab_size) - - def set_gguf_parameters(self): - d_model = self.find_hparam(["hidden_size", "d_model"]) - d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4 - d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model - d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16 - # ceiling division - # ref: https://stackoverflow.com/a/17511341/22827863 - # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58 - dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16) - rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5 - - # Fail early for models which don't have a block expansion factor of 2 - assert d_inner == 2 * d_model - - self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default - self.gguf_writer.add_embedding_length(d_model) - self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading - self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_ssm_conv_kernel(d_conv) - self.gguf_writer.add_ssm_inner_size(d_inner) - self.gguf_writer.add_ssm_state_size(d_state) - self.gguf_writer.add_ssm_time_step_rank(dt_rank) - self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) - self.gguf_writer.add_file_type(self.ftype) - - _tok_embd = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) - tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - - new_name = self.map_tensor_name(name) - - if name.endswith(".A_log"): - logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) - - # assuming token_embd.weight is seen before output.weight - if self._tok_embd is not None and new_name == output_name: - if torch.equal(self._tok_embd, data_torch): - logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting") - return [] - elif new_name == tok_embd_name: - self._tok_embd = data_torch - - return [(new_name, data_torch)] - - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del n_dims # unused - - return bid is not None and new_name in ( - self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ - gguf.MODEL_TENSOR.SSM_CONV1D, - gguf.MODEL_TENSOR.SSM_X, - gguf.MODEL_TENSOR.SSM_DT, - gguf.MODEL_TENSOR.SSM_A, - gguf.MODEL_TENSOR.SSM_D, - ] - ) - - -@Model.register("CohereForCausalLM") -class CommandR2Model(Model): - model_arch = gguf.MODEL_ARCH.COMMAND_R - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # max_position_embeddings = 8192 in config.json but model was actually - # trained on 128k context length - # aya-23 models don't have model_max_length specified - self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"]) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_logit_scale(self.hparams["logit_scale"]) - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) - - -@Model.register("OlmoForCausalLM") -@Model.register("OLMoForCausalLM") -class OlmoModel(Model): - model_arch = gguf.MODEL_ARCH.OLMO - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_layer_norm_eps(1e-5) - clip_qkv = self.hparams.get("clip_qkv") - if clip_qkv is not None: - self.gguf_writer.add_clamp_kqv(clip_qkv) - - # Same as super class, but permuting q_proj, k_proj - # Copied from: LlamaModel - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("JinaBertModel", "JinaBertForMaskedLM") -class JinaBertV2Model(BertModel): - model_arch = gguf.MODEL_ARCH.JINA_BERT_V2 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.intermediate_size = self.hparams["intermediate_size"] - - def get_tensors(self): - for name, data in super().get_tensors(): - if 'gated_layer' in name: - d1 = data[:self.intermediate_size, :] - name1 = name.replace('gated_layers', 'gated_layers_w') - name1 = name1.replace('up_gated_layer', 'gated_layers_v') - d2 = data[self.intermediate_size:, :] - name2 = name.replace('gated_layers', 'gated_layers_v') - name2 = name2.replace('up_gated_layer', 'gated_layers_w') - yield name1, d1 - yield name2, d2 - continue - - yield name, data - - def set_vocab(self): - tokenizer_class = 'BertTokenizer' - with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f: - tokenizer_class = json.load(f)['tokenizer_class'] - - if tokenizer_class == 'BertTokenizer': - super().set_vocab() - elif tokenizer_class == 'RobertaTokenizer': - self._set_vocab_gpt2() - self.gguf_writer.add_token_type_count(2) - else: - raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel') - self.gguf_writer.add_add_bos_token(True) - self.gguf_writer.add_add_eos_token(True) - - -@Model.register("OpenELMForCausalLM") -class OpenELMModel(Model): - model_arch = gguf.MODEL_ARCH.OPENELM - - @staticmethod - def _make_divisible(v: float | int, divisor: int) -> int: - # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38 - new_v = max(divisor, int(v + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than 10%. - if new_v < 0.9 * v: - new_v += divisor - return new_v - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - ffn_multipliers: list[float] = self.hparams["ffn_multipliers"] - ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"] - self._n_embd: int = self.hparams["model_dim"] - self._num_kv_heads: list[int] = self.hparams["num_kv_heads"] - self._num_query_heads: list[int] = self.hparams["num_query_heads"] - self._ffn_dims: list[int] = [ - OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor) - for multiplier in ffn_multipliers - ] - assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int) - assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int) - - # Uses the tokenizer from meta-llama/Llama-2-7b-hf - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"]) - - def set_gguf_parameters(self): - n_embd = self._n_embd - head_dim = self.hparams["head_dim"] - rot_pct = 1.0 - assert self.block_count == len(self._num_kv_heads) - assert self.block_count == len(self._num_query_heads) - assert self.block_count == len(self._ffn_dims) - - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_context_length(self.hparams["max_context_length"]) - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(self._ffn_dims) - self.gguf_writer.add_head_count(self._num_query_heads) - self.gguf_writer.add_head_count_kv(self._num_kv_heads) - self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"]) - # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30 - self.gguf_writer.add_layer_norm_rms_eps(1e-6) - self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim)) - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) - self.gguf_writer.add_file_type(self.ftype) - - def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: - if "n_layers" in keys: - return self.hparams["num_transformer_layers"] - - return super().find_hparam(keys, optional) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - - # split ff - if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight": - ff_dim = self._ffn_dims[bid] - yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]) - yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]) - return - - yield (self.map_tensor_name(name), data_torch) - - -@Model.register("ArcticForCausalLM") -class ArcticModel(Model): - model_arch = gguf.MODEL_ARCH.ARCTIC - - def set_vocab(self): - # The reason for using a custom implementation here is that the - # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from - # tokenizer.model and used them as BOS and EOS instead of adding new tokens. - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - logger.error(f'Error: Missing {tokenizer_path}') - sys.exit(1) - - # Read the whole vocabulary from the tokenizer.model file - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - # Use the added_tokens_decoder field from tokeniser_config.json as the source - # of information about added/redefined tokens and modify them accordingly. - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - - if "added_tokens_decoder" in tokenizer_config_json: - added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"] - for token_id, token_json in added_tokens_decoder.items(): - token_id = int(token_id) - if token_id >= vocab_size: - logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - token_content = token_json["content"] - token_type = SentencePieceTokenTypes.USER_DEFINED - token_score = -10000.0 - - # Map unk_token to UNKNOWN, other special tokens to CONTROL - # Set the score to 0.0 as in the original tokenizer.model - if ("special" in token_json) and token_json["special"]: - if token_content == tokenizer_config_json["unk_token"]: - token_type = SentencePieceTokenTypes.UNKNOWN - else: - token_type = SentencePieceTokenTypes.CONTROL - token_score = 0.0 - - logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})") - tokens[token_id] = token_content.encode("utf-8") - toktypes[token_id] = token_type - scores[token_id] = token_score - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"]) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["w1", "w2", "w3"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("DeepseekV2ForCausalLM") -class DeepseekV2Model(Model): - model_arch = gguf.MODEL_ARCH.DEEPSEEK2 - - def set_vocab(self): - self._set_vocab_gpt2() - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - - self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None: - self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"]) - self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"]) - self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"]) - self.gguf_writer.add_value_length(hparams["v_head_dim"]) - self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) - self.gguf_writer.add_expert_count(hparams["n_routed_experts"]) - self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"]) - self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"]) - self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"]) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "yarn": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) - self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"]) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find("mlp.experts") != -1: - n_experts = self.hparams["n_routed_experts"] - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for w_name in ["down_proj", "gate_proj", "up_proj"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("T5WithLMHeadModel") -@Model.register("T5ForConditionalGeneration") -@Model.register("MT5ForConditionalGeneration") -@Model.register("UMT5ForConditionalGeneration") -class T5Model(Model): - model_arch = gguf.MODEL_ARCH.T5 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.shared_token_embeddings_found = False - - def set_vocab(self): - # to avoid TypeError: Descriptors cannot be created directly - # exception when importing sentencepiece_model_pb2 - os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" - from sentencepiece import SentencePieceProcessor - from sentencepiece import sentencepiece_model_pb2 as model - - tokenizer_path = self.dir_model / 'tokenizer.model' - - # many older models use spiece.model tokenizer model filename - if not tokenizer_path.is_file(): - tokenizer_path = self.dir_model / 'spiece.model' - - if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") - - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - - # some models like Pile-T5 family use BPE tokenizer instead of Unigram - if sentencepiece_model.trainer_spec.model_type == 2: # BPE - # assure the tokenizer model file name is correct - assert tokenizer_path.name == 'tokenizer.model' - return self._set_vocab_sentencepiece() - else: - assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM - - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces - precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - if vocab_size > len(tokens): - pad_count = vocab_size - len(tokens) - logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") - for i in range(1, pad_count + 1): - tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.UNUSED) - - self.gguf_writer.add_tokenizer_model("t5") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - self.gguf_writer.add_add_space_prefix(add_prefix) - self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces) - if precompiled_charsmap: - self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_add_eos_token(True) - - def set_gguf_parameters(self): - if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None: - logger.warning("Couldn't find context length in config.json, assuming default value of 512") - n_ctx = 512 - self.gguf_writer.add_context_length(n_ctx) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"]) - self.gguf_writer.add_block_count(self.hparams["num_layers"]) - self.gguf_writer.add_head_count(self.hparams["num_heads"]) - self.gguf_writer.add_key_length(self.hparams["d_kv"]) - self.gguf_writer.add_value_length(self.hparams["d_kv"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight", - # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored - # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder - # and decoder and ignore the remaining ones. - if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]: - if not self.shared_token_embeddings_found: - name = "shared.weight" - self.shared_token_embeddings_found = True - else: - logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.") - return [] - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("JAISLMHeadModel") -class JaisModel(Model): - model_arch = gguf.MODEL_ARCH.JAIS - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # SwigLU activation - assert self.hparams["activation_function"] == "swiglu" - # ALiBi position embedding - assert self.hparams["position_embedding_type"] == "alibi" - - # Embeddings scale - self.embeddings_scale = 1.0 - # note: For some JAIS flavors, output is tied to (same as) wte in original model - self.output_is_wte = False - if 'mup_embeddings_scale' in self.hparams: - self.output_is_wte = True # Hack (?) - self.embeddings_scale = self.hparams['mup_embeddings_scale'] - elif 'embeddings_scale' in self.hparams: - self.embeddings_scale = self.hparams['embeddings_scale'] - else: - assert False - - self.width_scale = 1.0 - if 'mup_output_alpha' in self.hparams: - assert 'mup_width_scale' in self.hparams - self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale'] - elif 'width_scale' in self.hparams: - self.width_scale = self.hparams['width_scale'] - else: - assert False - - self.max_alibi_bias = 8.0 - - def set_vocab(self): - self._set_vocab_gpt2() - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"]) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - - # we don't need these - if name.endswith((".attn.bias")): - return tensors - - if name.endswith(("relative_pe.slopes")): - # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation) - # Some other models has max_alibi_bias spelled out explicitly in the hyperparams, - # but Jais's PyTorch model simply precalculates the slope values and places them - # in relative_pes.slopes - n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"])) - first_val = float(data_torch[0].item()) - self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2) - - return tensors - - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")): - data_torch = data_torch.transpose(1, 0) - - new_name = self.map_tensor_name(name) - - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((new_name, data_torch * self.embeddings_scale)) - if self.output_is_wte: - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch * self.width_scale)) - elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT): - assert not self.output_is_wte - tensors.append((new_name, data_torch * self.width_scale)) - else: - tensors.append((new_name, data_torch)) - - return tensors - - def prepare_tensors(self): - super().prepare_tensors() - self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias) - - -@Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration") -class ChatGLMModel(Model): - model_arch = gguf.MODEL_ARCH.CHATGLM - - def set_vocab_chatglm3(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[bytes] = [] - toktypes: list[int] = [] - scores: list[float] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab())) - assert max(tokenizer.get_vocab().values()) < vocab_size - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - for token_id in range(vocab_size): - piece = tokenizer._convert_id_to_token(token_id) - if token_id == 0: - piece = "" - elif token_id == 1: - piece = "" - elif token_id == 2: - piece = "" - - text = piece.encode("utf-8") - score = 0.0 - # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py), - # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size() - if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size(): - score = tokenizer.tokenizer.sp_model.get_score(token_id) - - if token_id >= tokenizer.tokenizer.sp_model.vocab_size(): - if piece in special_tokens: - toktype = SentencePieceTokenTypes.CONTROL - elif len(piece) == 0: - text = f"[PAD{token_id}]".encode("utf-8") - toktype = SentencePieceTokenTypes.UNUSED - else: - toktype = SentencePieceTokenTypes.USER_DEFINED - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - continue - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.tokenizer.sp_model.is_unknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.tokenizer.sp_model.is_control(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.tokenizer.sp_model.is_unused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.tokenizer.sp_model.is_byte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - self.gguf_writer.add_tokenizer_model("llama") - # glm3 needs prefix and suffix formatted as: - # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>" - self.gguf_writer.add_tokenizer_pre("chatglm-spm") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - @staticmethod - def token_bytes_to_string(b): - from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode - byte_encoder = bytes_to_unicode() - return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) - - @staticmethod - def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]: - parts = [bytes([b]) for b in token] - while True: - min_idx = None - min_rank = None - for i, pair in enumerate(zip(parts[:-1], parts[1:])): - rank = mergeable_ranks.get(pair[0] + pair[1]) - if rank is not None and (min_rank is None or rank < min_rank): - min_idx = i - min_rank = rank - if min_rank is None or (max_rank is not None and min_rank >= max_rank): - break - assert min_idx is not None - parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:] - return parts - - def set_vocab(self): - if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""): - self.set_vocab_chatglm3() - return - - dir_model = self.dir_model - hparams = self.hparams - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["padded_vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[ChatGLMModel.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) >= 2 and len(merged) <= 7 - merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged))) - - # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined - added_vocab = tokenizer.get_added_vocab() - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - # only add special tokens when they were not already loaded from config.json - special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"]) - special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) - # this one is usually not in config.json anyway - special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_head_kv = self.hparams.get("multi_query_group_num", n_head) - self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) - self.gguf_writer.add_embedding_length(n_embed) - self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed)) - self.gguf_writer.add_block_count(self.hparams["num_layers"]) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_dimension_count(64) - self.gguf_writer.add_add_bos_token(False) - rope_freq = 10000 - if "rope_ratio" in self.hparams: - rope_freq = rope_freq * self.hparams["rope_ratio"] - self.gguf_writer.add_rope_freq_base(rope_freq) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - if name.endswith(".rotary_pos_emb.inv_freq"): - return [] - - name = name.removeprefix("transformer.") - return [(self.map_tensor_name(name), data_torch)] - -###### CONVERSION LOGIC ###### - - -# tree of lazy tensors -class LazyTorchTensor(gguf.LazyBase): - _tensor_type = torch.Tensor - # to keep the type-checker happy - dtype: torch.dtype - shape: torch.Size - - # only used when converting a torch.Tensor to a np.ndarray - _dtype_map: dict[torch.dtype, type] = { - torch.float16: np.float16, - torch.float32: np.float32, - } - - # used for safetensors slices - # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046 - # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734 - _dtype_str_map: dict[str, torch.dtype] = { - "F64": torch.float64, - "F32": torch.float32, - "BF16": torch.bfloat16, - "F16": torch.float16, - # "U64": torch.uint64, - "I64": torch.int64, - # "U32": torch.uint32, - "I32": torch.int32, - # "U16": torch.uint16, - "I16": torch.int16, - "U8": torch.uint8, - "I8": torch.int8, - "BOOL": torch.bool, - "F8_E4M3": torch.float8_e4m3fn, - "F8_E5M2": torch.float8_e5m2, - } - - def numpy(self) -> gguf.LazyNumpyTensor: - dtype = self._dtype_map[self.dtype] - return gguf.LazyNumpyTensor( - meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), - args=(self,), - func=(lambda s: s.numpy()) - ) - - @classmethod - def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor: - return torch.empty(size=shape, dtype=dtype, device="meta") - - @classmethod - def from_safetensors_slice(cls, st_slice: Any) -> Tensor: - dtype = cls._dtype_str_map[st_slice.get_dtype()] - shape: tuple[int, ...] = tuple(st_slice.get_shape()) - lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:]) - return cast(torch.Tensor, lazy) - - @classmethod - def __torch_function__(cls, func, types, args=(), kwargs=None): - del types # unused - - if kwargs is None: - kwargs = {} - - if func is torch.Tensor.numpy: - return args[0].numpy() - - return cls._wrap_fn(func)(*args, **kwargs) - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Convert a huggingface model to a GGML compatible file") - parser.add_argument( - "--vocab-only", action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", - ) - parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", - ) - parser.add_argument( - "--bigendian", action="store_true", - help="model is executed on big endian machine", - ) - parser.add_argument( - "model", type=Path, - help="directory containing model file", - ) - parser.add_argument( - "--use-temp-file", action="store_true", - help="use the tempfile library while processing (helpful when running out of memory, process killed)", - ) - parser.add_argument( - "--no-lazy", action="store_true", - help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", - ) - parser.add_argument( - "--model-name", type=str, default=None, - help="name of the model", - ) - parser.add_argument( - "--verbose", action="store_true", - help="increase output verbosity", - ) - parser.add_argument( - "--split-max-tensors", type=int, default=0, - help="max tensors in each split", - ) - parser.add_argument( - "--split-max-size", type=str, default="0", - help="max size per split N(M|G)", - ) - parser.add_argument( - "--dry-run", action="store_true", - help="only print out a split plan and exit, without writing any new files", - ) - parser.add_argument( - "--no-tensor-first-split", action="store_true", - help="do not add tensors to the first split (disabled by default)" - ) - parser.add_argument( - "--metadata", type=Path, - help="Specify the path for an authorship metadata override file" - ) - - return parser.parse_args() - - -def split_str_to_n_bytes(split_str: str) -> int: - if split_str.endswith("K"): - n = int(split_str[:-1]) * 1000 - elif split_str.endswith("M"): - n = int(split_str[:-1]) * 1000 * 1000 - elif split_str.endswith("G"): - n = int(split_str[:-1]) * 1000 * 1000 * 1000 - elif split_str.isnumeric(): - n = int(split_str) - else: - raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G") - - if n < 0: - raise ValueError(f"Invalid split size: {split_str}, must be positive") - - return n - - -def main() -> None: - args = parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - dir_model = args.model - - if not dir_model.is_dir(): - logger.error(f'Error: {args.model} is not a directory') - sys.exit(1) - - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "auto": gguf.LlamaFileType.GUESSED, - } - - is_split = args.split_max_tensors > 0 or args.split_max_size != "0" - if args.use_temp_file and is_split: - logger.error("Error: Cannot use temp file when splitting") - sys.exit(1) - - if args.outfile is not None: - fname_out = args.outfile - else: - fname_out = dir_model - - logger.info(f"Loading model: {dir_model.name}") - - hparams = Model.load_hparams(dir_model) - - with torch.inference_mode(): - output_type = ftype_map[args.outtype] - model_architecture = hparams["architectures"][0] - - try: - model_class = Model.from_model_architecture(model_architecture) - except NotImplementedError: - logger.error(f"Model {model_architecture} is not supported") - sys.exit(1) - - model_instance = model_class(dir_model=dir_model, ftype=output_type, fname_out=fname_out, - is_big_endian=args.bigendian, use_temp_file=args.use_temp_file, - eager=args.no_lazy, - metadata_override=args.metadata, model_name=args.model_name, - split_max_tensors=args.split_max_tensors, - split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run, - small_first_shard=args.no_tensor_first_split) - - if args.vocab_only: - logger.info("Exporting model vocab...") - model_instance.write_vocab() - logger.info(f"Model vocab successfully exported to {model_instance.fname_out}") - else: - logger.info("Exporting model...") - model_instance.write() - out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out - logger.info(f"Model successfully exported to {out_path}") - - -if __name__ == '__main__': - main() diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py deleted file mode 100755 index d5a2d925e..000000000 --- a/convert_hf_to_gguf_update.py +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# This script downloads the tokenizer models of the specified models from Huggingface and -# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py -# -# This is necessary in order to analyze the type of pre-tokenizer used by the model and -# provide the necessary information to llama.cpp via the GGUF header in order to implement -# the same pre-tokenizer. -# -# ref: https://github.com/ggerganov/llama.cpp/pull/6920 -# -# Instructions: -# -# - Add a new model to the "models" list -# - Run the script with your huggingface token: -# -# python3 convert_hf_to_gguf_update.py -# -# - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py -# - Update llama.cpp with the new pre-tokenizer if necessary -# -# TODO: generate tokenizer tests for llama.cpp -# - -import logging -import os -import pathlib -import re - -import requests -import sys -import json - -from hashlib import sha256 -from enum import IntEnum, auto -from transformers import AutoTokenizer - -logging.basicConfig(level=logging.DEBUG) -logger = logging.getLogger("convert_hf_to_gguf_update") -sess = requests.Session() - - -class TOKENIZER_TYPE(IntEnum): - SPM = auto() - BPE = auto() - WPM = auto() - UGM = auto() - - -# TODO: this string has to exercise as much pre-tokenizer functionality as possible -# will be updated with time - contributions welcome -CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' - -if len(sys.argv) == 2: - token = sys.argv[1] - if not token.startswith("hf_"): - logger.info("Huggingface token seems invalid") - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) -else: - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) - -# TODO: add models here, base models preferred -models = [ - {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", }, - {"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", }, - {"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", }, - {"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", }, - {"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", }, - {"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", }, - {"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", }, - {"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", }, - {"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", }, - {"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", }, - {"name": "stablelm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b", }, - {"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", }, - {"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", }, - {"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", }, - {"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", }, - {"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", }, - {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! - {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, - {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, - {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, - {"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", }, - {"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", }, - {"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B - {"name": "gemma", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2b", }, - {"name": "gemma-2", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2-9b", }, - {"name": "jais", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/core42/jais-13b", }, - {"name": "t5", "tokt": TOKENIZER_TYPE.UGM, "repo": "https://huggingface.co/google-t5/t5-small", }, - {"name": "codeshell", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", }, - {"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", }, - {"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", }, -] - - -def download_file_with_auth(url, token, save_path): - headers = {"Authorization": f"Bearer {token}"} - response = sess.get(url, headers=headers) - response.raise_for_status() - os.makedirs(os.path.dirname(save_path), exist_ok=True) - with open(save_path, 'wb') as downloaded_file: - downloaded_file.write(response.content) - logger.info(f"File {save_path} downloaded successfully") - - -def download_model(model): - name = model["name"] - repo = model["repo"] - tokt = model["tokt"] - - os.makedirs(f"models/tokenizers/{name}", exist_ok=True) - - files = ["config.json", "tokenizer.json", "tokenizer_config.json"] - - if tokt == TOKENIZER_TYPE.SPM: - files.append("tokenizer.model") - - if tokt == TOKENIZER_TYPE.UGM: - files.append("spiece.model") - - for file in files: - save_path = f"models/tokenizers/{name}/{file}" - if os.path.isfile(save_path): - logger.info(f"{name}: File {save_path} already exists - skipping") - continue - download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path) - - -for model in models: - try: - download_model(model) - except Exception as e: - logger.error(f"Failed to download model {model['name']}. Error: {e}") - - -# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function: - -src_ifs = "" -for model in models: - name = model["name"] - tokt = model["tokt"] - - if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM: - continue - - # Skip if the tokenizer folder does not exist or there are other download issues previously - if not os.path.exists(f"models/tokenizers/{name}"): - logger.warning(f"Directory for tokenizer {name} not found. Skipping...") - continue - - # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") - continue # Skip to the next model if the tokenizer can't be loaded - - chktok = tokenizer.encode(CHK_TXT) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.info(f"model: {name}") - logger.info(f"tokt: {tokt}") - logger.info(f"repo: {model['repo']}") - logger.info(f"chktok: {chktok}") - logger.info(f"chkhsh: {chkhsh}") - - # print the "pre_tokenizer" content from the tokenizer.json - with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: - cfg = json.load(f) - normalizer = cfg["normalizer"] - logger.info("normalizer: " + json.dumps(normalizer, indent=4)) - pre_tokenizer = cfg["pre_tokenizer"] - logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) - if "ignore_merges" in cfg["model"]: - logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) - - logger.info("") - - src_ifs += f" if chkhsh == \"{chkhsh}\":\n" - src_ifs += f" # ref: {model['repo']}\n" - src_ifs += f" res = \"{name}\"\n" - -src_func = f""" - def get_vocab_base_pre(self, tokenizer) -> str: - # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that - # is specific for the BPE pre-tokenizer used by the model - # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer - - chktxt = {repr(CHK_TXT)} - - chktok = tokenizer.encode(chktxt) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.debug(f"chktok: {{chktok}}") - logger.debug(f"chkhsh: {{chkhsh}}") - - res = None - - # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script - # or pull the latest version of the model from Huggingface - # don't edit the hashes manually! -{src_ifs} - if res is None: - logger.warning("\\n") - logger.warning("**************************************************************************************") - logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!") - logger.warning("** There are 2 possible reasons for this:") - logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") - logger.warning("** - the pre-tokenization config has changed upstream") - logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") - logger.warning("**") - logger.warning(f"** chkhsh: {{chkhsh}}") - logger.warning("**************************************************************************************") - logger.warning("\\n") - raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()") - - logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}") - logger.debug(f"chkhsh: {{chkhsh}}") - - return res -""" - -convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") -convert_py = convert_py_pth.read_text(encoding="utf-8") -convert_py = re.sub( - r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)", - lambda m: m.group(1) + src_func + m.group(3), - convert_py, - flags=re.DOTALL | re.MULTILINE, -) - -convert_py_pth.write_text(convert_py, encoding="utf-8") - -logger.info("+++ convert_hf_to_gguf.py was updated") - -# generate tests for each tokenizer model - -tests = [ - "ied 4 ½ months", - "Führer", - "", - " ", - " ", - " ", - "\t", - "\n", - "\n\n", - "\n\n\n", - "\t\n", - "Hello world", - " Hello world", - "Hello World", - " Hello World", - " Hello World!", - "Hello, world!", - " Hello, world!", - " this is 🦙.cpp", - "w048 7tuijk dsdfhu", - "нещо на Български", - "កាន់តែពិសេសអាចខលចេញ", - "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", - "Hello", - " Hello", - " Hello", - " Hello", - " Hello", - " Hello\n Hello", - " (", - "\n =", - "' era", - "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~", - "!!!!!!", - "3", - "33", - "333", - "3333", - "33333", - "333333", - "3333333", - "33333333", - "333333333", - "Cửa Việt", # llama-bpe fails on this - " discards", - CHK_TXT, -] - -# write the tests to ./models/ggml-vocab-{name}.gguf.inp -# the format is: -# -# test0 -# __ggml_vocab_test__ -# test1 -# __ggml_vocab_test__ -# ... -# - -# with each model, encode all tests and write the results in ./models/ggml-vocab-{name}.gguf.out -# for each test, write the resulting tokens on a separate line - -for model in models: - name = model["name"] - tokt = model["tokt"] - - # Skip if the tokenizer folder does not exist or there are other download issues previously - if not os.path.exists(f"models/tokenizers/{name}"): - logger.warning(f"Directory for tokenizer {name} not found. Skipping...") - continue - - # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Failed to load tokenizer for model {name}. Error: {e}") - continue # Skip this model and continue with the next one in the loop - - with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f: - for text in tests: - f.write(f"{text}") - f.write("\n__ggml_vocab_test__\n") - - with open(f"models/ggml-vocab-{name}.gguf.out", "w") as f: - for text in tests: - res = tokenizer.encode(text, add_special_tokens=False) - for r in res: - f.write(f" {r}") - f.write("\n") - - logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*") - -# generate commands for creating vocab files - -logger.info("\nRun the following commands to generate the vocab files for testing:\n") - -for model in models: - name = model["name"] - - print(f"python3 convert_hf_to_gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100 - -logger.info("\n") diff --git a/convert_llama_ggml_to_gguf.py b/convert_llama_ggml_to_gguf.py deleted file mode 100755 index 7b00b4398..000000000 --- a/convert_llama_ggml_to_gguf.py +++ /dev/null @@ -1,450 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import logging -import argparse -import os -import struct -import sys -from enum import IntEnum -from pathlib import Path - -import numpy as np - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -logger = logging.getLogger("ggml-to-gguf") - - -class GGMLFormat(IntEnum): - GGML = 0 - GGMF = 1 - GGJT = 2 - - -class GGMLFType(IntEnum): - ALL_F32 = 0 - MOSTLY_F16 = 1 - MOSTLY_Q4_0 = 2 - MOSTLY_Q4_1 = 3 - MOSTLY_Q4_1_SOME_F16 = 4 - MOSTLY_Q8_0 = 7 - MOSTLY_Q5_0 = 8 - MOSTLY_Q5_1 = 9 - MOSTLY_Q2_K = 10 - MOSTLY_Q3_K_S = 11 - MOSTLY_Q3_K_M = 12 - MOSTLY_Q3_K_L = 13 - MOSTLY_Q4_K_S = 14 - MOSTLY_Q4_K_M = 15 - MOSTLY_Q5_K_S = 16 - MOSTLY_Q5_K_M = 17 - MOSTLY_Q6_K = 18 - - -class Hyperparameters: - def __init__(self): - self.n_vocab = self.n_embd = self.n_mult = self.n_head = 0 - self.n_layer = self.n_rot = self.n_ff = 0 - self.ftype = GGMLFType.ALL_F32 - - def set_n_ff(self, model): - ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight') - assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' - ff_tensor = model.tensors[ff_tensor_idx] - self.n_ff = ff_tensor.dims[1] - - def load(self, data, offset): - ( - self.n_vocab, - self.n_embd, - self.n_mult, - self.n_head, - self.n_layer, - self.n_rot, - ftype, - ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) - try: - self.ftype = GGMLFType(ftype) - except ValueError: - raise ValueError(f'Invalid ftype {ftype}') - return 4 * 7 - - def __str__(self): - return f'' - - -class Vocab: - def __init__(self, load_scores = True): - self.items = [] - self.load_scores = load_scores - - def load(self, data, offset, n_vocab): - orig_offset = offset - for _ in range(n_vocab): - itemlen = struct.unpack('= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' - assert name_len < 4096, 'Absurd tensor name length' - quant = gguf.GGML_QUANT_SIZES.get(dtype) - assert quant is not None, 'Unknown tensor type' - (blksize, tysize) = quant - offset += 12 - self.dtype= dtype - self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) - offset += 4 * n_dims - self.name = bytes(data[offset:offset + name_len]) - offset += name_len - pad = ((offset + 31) & ~31) - offset if self.use_padding else 0 - offset += pad - n_elems = np.prod(self.dims) - n_bytes = np.int64(np.int64(n_elems) * np.int64(tysize)) // np.int64(blksize) - self.start_offset = offset - self.len_bytes = n_bytes - offset += n_bytes - return offset - orig_offset - - -class GGMLModel: - - file_format: GGMLFormat - format_version: int - - def __init__(self): - self.hyperparameters = None - self.vocab = None - self.tensor_map = {} - self.tensors = [] - - def validate_header(self, data, offset): - magic = bytes(data[offset:offset + 4]) - if magic == b'GGUF': - raise ValueError('File is already in GGUF format.') - if magic == b'lmgg': - self.file_format = GGMLFormat.GGML - self.format_version = 1 - return 4 - version = struct.unpack(' 3: - raise ValueError(f'Cannot handle unexpected GGJT file version {version}') - self.file_format = GGMLFormat.GGJT - self.format_version = version - return 8 - raise ValueError(f"Unexpected file magic {magic!r}! This doesn't look like a GGML format file.") - - def validate_conversion(self, ftype): - err = '' - if (self.file_format < GGMLFormat.GGJT or self.format_version < 2): - if ftype not in (GGMLFType.ALL_F32, GGMLFType.MOSTLY_F16): - err = 'Quantizations changed in GGJTv2. Can only convert unquantized GGML files older than GGJTv2.' - elif (self.file_format == GGMLFormat.GGJT and self.format_version == 2): - if ftype in (GGMLFType.MOSTLY_Q4_0, GGMLFType.MOSTLY_Q4_1, - GGMLFType.MOSTLY_Q4_1_SOME_F16, GGMLFType.MOSTLY_Q8_0): - err = 'Q4 and Q8 quantizations changed in GGJTv3.' - if len(err) > 0: - raise ValueError(f'{err} Sorry, your {self.file_format.name}v{self.format_version} file of type {ftype.name} is not eligible for conversion.') - - def load(self, data, offset): - offset += self.validate_header(data, offset) - hp = Hyperparameters() - offset += hp.load(data, offset) - logger.info(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}') - self.validate_conversion(hp.ftype) - vocab = Vocab(load_scores = self.file_format > GGMLFormat.GGML) - offset += vocab.load(data, offset, hp.n_vocab) - tensors: list[Tensor] = [] - tensor_map = {} - while offset < len(data): - tensor = Tensor(use_padding = self.file_format > GGMLFormat.GGMF) - offset += tensor.load(data, offset) - tensor_map[tensor.name] = len(tensors) - tensors.append(tensor) - self.hyperparameters = hp - self.vocab = vocab - self.tensors = tensors - self.tensor_map = tensor_map - hp.set_n_ff(self) - return offset - - -class GGMLToGGUF: - def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None, special_vocab = None): - hp = ggml_model.hyperparameters - self.model = ggml_model - self.data = data - self.cfg = cfg - self.params_override = params_override - self.vocab_override = vocab_override - self.special_vocab = special_vocab - if params_override is not None: - n_kv_head = params_override.n_head_kv - else: - if cfg.gqa == 1: - n_kv_head = hp.n_head - else: - gqa = float(cfg.gqa) - n_kv_head = None - for x in range(1, 256): - if float(hp.n_head) / float(x) == gqa: - n_kv_head = x - assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" - logger.info(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') - self.n_kv_head = n_kv_head - self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) - - def save(self): - logger.info('* Preparing to save GGUF file') - gguf_writer = gguf.GGUFWriter( - self.cfg.output, - gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], - use_temp_file = False) - self.add_params(gguf_writer) - self.add_vocab(gguf_writer) - if self.special_vocab is not None: - self.special_vocab.add_to_gguf(gguf_writer) - self.add_tensors(gguf_writer) - logger.info(" gguf: write header") - gguf_writer.write_header_to_file() - logger.info(" gguf: write metadata") - gguf_writer.write_kv_data_to_file() - logger.info(" gguf: write tensors") - gguf_writer.write_tensors_to_file() - gguf_writer.close() - - def add_params(self, gguf_writer): - hp = self.model.hyperparameters - cfg = self.cfg - if cfg.desc is not None: - desc = cfg.desc - else: - desc = f'converted from legacy {self.model.file_format.name}v{self.model.format_version} {hp.ftype.name} format' - try: - # Filenames aren't necessarily valid UTF8. - name = cfg.name if cfg.name is not None else cfg.input.name - except UnicodeDecodeError: - name = None - logger.info('* Adding model parameters and KV items') - if name is not None: - gguf_writer.add_name(name) - gguf_writer.add_description(desc) - gguf_writer.add_file_type(int(hp.ftype)) - if self.params_override is not None: - po = self.params_override - assert po.n_embd == hp.n_embd, 'Model hyperparams mismatch' - assert po.n_layer == hp.n_layer, 'Model hyperparams mismatch' - assert po.n_head == hp.n_head, 'Model hyperparams mismatch' - gguf_writer.add_context_length (po.n_ctx) - gguf_writer.add_embedding_length (po.n_embd) - gguf_writer.add_block_count (po.n_layer) - gguf_writer.add_feed_forward_length (po.n_ff) - gguf_writer.add_rope_dimension_count(po.n_embd // po.n_head) - gguf_writer.add_head_count (po.n_head) - gguf_writer.add_head_count_kv (po.n_head_kv) - gguf_writer.add_layer_norm_rms_eps (po.f_norm_eps) - return - gguf_writer.add_context_length(cfg.context_length) - gguf_writer.add_embedding_length(hp.n_embd) - gguf_writer.add_block_count(hp.n_layer) - gguf_writer.add_feed_forward_length(hp.n_ff) - gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) - gguf_writer.add_head_count(hp.n_head) - gguf_writer.add_head_count_kv(self.n_kv_head) - gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) - - def add_vocab(self, gguf_writer): - hp = self.model.hyperparameters - gguf_writer.add_tokenizer_model('llama') - gguf_writer.add_tokenizer_pre('default') - tokens = [] - scores = [] - toktypes = [] - if self.vocab_override is not None: - vo = self.vocab_override - logger.info('* Adding vocab item(s)') - for (_, (vbytes, score, ttype)) in enumerate(vo.all_tokens()): - tokens.append(vbytes) - scores.append(score) - toktypes.append(ttype) - assert len(tokens) == hp.n_vocab, \ - f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}' - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - if len(toktypes) > 0: - gguf_writer.add_token_types(toktypes) - return - logger.info(f'* Adding {hp.n_vocab} vocab item(s)') - assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab' - for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): - tt = 1 # Normal - # Special handling for UNK, BOS, EOS tokens. - if tokid <= 2: - if tokid == 0: - vbytes = b'' - tt = 2 - elif tokid == 1: - vbytes = b'' - tt = 3 - else: - vbytes = b'' - tt = 3 - elif len(vbytes) == 0: - tt = 3 # Control - elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: - vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8') - tt = 6 # Byte - else: - vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') - toktypes.append(tt) - tokens.append(vbytes) - scores.append(vscore) - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) - gguf_writer.add_unk_token_id(0) - gguf_writer.add_bos_token_id(1) - gguf_writer.add_eos_token_id(2) - - def add_tensors(self, gguf_writer): - tensor_map = self.name_map - data = self.data - logger.info(f'* Adding {len(self.model.tensors)} tensor(s)') - for tensor in self.model.tensors: - name = str(tensor.name, 'UTF-8') - mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - assert mapped_name is not None, f'Bad name {name}' - tempdims = list(tensor.dims[:]) - if len(tempdims) > 1: - temp = tempdims[1] - tempdims[1] = tempdims[0] - tempdims[0] = temp - gguf_writer.add_tensor( - mapped_name, - data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], - raw_shape = tempdims, - raw_dtype = tensor.dtype) - - -def handle_metadata(cfg, hp): - import examples.convert_legacy_llama as convert - - assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' - hf_config_path = cfg.model_metadata_dir / "config.json" - orig_config_path = cfg.model_metadata_dir / "params.json" - # We pass a fake model here. "original" mode will check the shapes of some - # tensors if information is missing in the .json file: other than that, the - # model data isn't used so this should be safe (at least for now). - fakemodel = { - 'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor), - 'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor), - } - fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab] - fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff] - if hf_config_path.exists(): - params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path) - elif orig_config_path.exists(): - params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) - else: - raise ValueError('Unable to load metadata') - vocab_path = Path(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir) - vocab_factory = convert.VocabFactory(vocab_path) - vocab, special_vocab = vocab_factory.load_vocab(cfg.vocabtype.split(","), cfg.model_metadata_dir) - convert.check_vocab_size(params, vocab) - return params, vocab, special_vocab - - -def handle_args(): - parser = argparse.ArgumentParser(description = 'Convert GGML models to GGUF') - parser.add_argument('--input', '-i', type = Path, required = True, - help = 'Input GGMLv3 filename') - parser.add_argument('--output', '-o', type = Path, required = True, - help ='Output GGUF filename') - parser.add_argument('--name', - help = 'Set model name') - parser.add_argument('--desc', - help = 'Set model description') - parser.add_argument('--gqa', type = int, default = 1, - help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') - parser.add_argument('--eps', default = '5.0e-06', - help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') - parser.add_argument('--context-length', '-c', type=int, default = 2048, - help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') - parser.add_argument('--model-metadata-dir', '-m', type = Path, - help ='Load HuggingFace/.pth vocab and metadata from the specified directory') - parser.add_argument("--vocab-dir", type=Path, - help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir") - parser.add_argument("--vocabtype", default="spm,hfft", - help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm,hfft)") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") - return parser.parse_args() - - -def main(): - cfg = handle_args() - logging.basicConfig(level=logging.DEBUG if cfg.verbose else logging.INFO) - logger.info(f'* Using config: {cfg}') - logger.warning('=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===') - if cfg.model_metadata_dir is None and (cfg.gqa == 1 or cfg.eps == '5.0e-06'): - logger.info('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".') - data = np.memmap(cfg.input, mode = 'r') - model = GGMLModel() - logger.info('* Scanning GGML input file') - offset = model.load(data, 0) # noqa - logger.info(f'* GGML model hyperparameters: {model.hyperparameters}') - vocab_override = None - params_override = None - special_vocab = None - if cfg.model_metadata_dir is not None: - (params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters) - logger.info('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') - logger.info(f'* Overriding params: {params_override}') - logger.info(f'* Overriding vocab: {vocab_override}') - logger.info(f'* Special vocab: {special_vocab}') - else: - logger.warning('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') - if model.file_format == GGMLFormat.GGML: - logger.info('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!') - converter = GGMLToGGUF( - model, data, cfg, - params_override = params_override, - vocab_override = vocab_override, - special_vocab = special_vocab - ) - converter.save() - logger.info(f'* Successful completion. Output saved to: {cfg.output}') - - -if __name__ == '__main__': - main() diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py deleted file mode 100755 index a88d0d4a9..000000000 --- a/convert_lora_to_gguf.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -from __future__ import annotations - -from dataclasses import dataclass -import logging -import argparse -import os -import sys -import json -from math import prod -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Sequence, SupportsIndex, cast - -import torch - -if TYPE_CHECKING: - from torch import Tensor - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -# reuse model definitions from convert_hf_to_gguf.py -from convert_hf_to_gguf import LazyTorchTensor, Model - -logger = logging.getLogger("lora-to-gguf") - - -@dataclass -class PartialLoraTensor: - A: Tensor | None = None - B: Tensor | None = None - - -# magic to support tensor shape modifications and splitting -class LoraTorchTensor: - _lora_A: Tensor # (n_rank, row_size) - _lora_B: Tensor # (col_size, n_rank) - _rank: int - - def __init__(self, A: Tensor, B: Tensor): - assert len(A.shape) == len(B.shape) - assert A.shape[-2] == B.shape[-1] - if A.dtype != B.dtype: - A = A.to(torch.float32) - B = B.to(torch.float32) - self._lora_A = A - self._lora_B = B - self._rank = B.shape[-1] - - def get_lora_A_B(self) -> tuple[Tensor, Tensor]: - return (self._lora_A, self._lora_B) - - def __getitem__( - self, - indices: ( - SupportsIndex - | slice - | tuple[SupportsIndex | slice | Tensor, ...] # TODO: add ellipsis in the type signature - ), - ) -> LoraTorchTensor: - shape = self.shape - if isinstance(indices, SupportsIndex): - if len(shape) > 2: - return LoraTorchTensor(self._lora_A[indices], self._lora_B[indices]) - else: - raise NotImplementedError # can't return a vector - elif isinstance(indices, slice): - if len(shape) > 2: - return LoraTorchTensor(self._lora_A[indices], self._lora_B[indices]) - else: - return LoraTorchTensor(self._lora_A, self._lora_B[indices]) - elif isinstance(indices, tuple): - assert len(indices) > 0 - if indices[-1] is Ellipsis: - return self[indices[:-1]] - # expand ellipsis - indices = tuple( - u - for v in ( - ( - (slice(None, None) for _ in range(len(indices) - 1)) - if i is Ellipsis - else (i,) - ) - for i in indices - ) - for u in v - ) - - if len(indices) < len(shape): - indices = (*indices, *(slice(None, None) for _ in range(len(indices), len(shape)))) - - # TODO: make sure this is correct - indices_A = ( - *( - ( - j.__index__() % self._lora_A.shape[i] - if isinstance(j, SupportsIndex) - else slice(None, None) - ) - for i, j in enumerate(indices[:-2]) - ), - slice(None, None), - indices[-1], - ) - indices_B = indices[:-1] - return LoraTorchTensor(self._lora_A[indices_A], self._lora_B[indices_B]) - else: - raise NotImplementedError # unknown indice type - - @property - def dtype(self) -> torch.dtype: - assert self._lora_A.dtype == self._lora_B.dtype - return self._lora_A.dtype - - @property - def shape(self) -> tuple[int, ...]: - assert len(self._lora_A.shape) == len(self._lora_B.shape) - return (*self._lora_B.shape[:-1], self._lora_A.shape[-1]) - - def size(self, dim=None): - assert dim is None - return self.shape - - def reshape(self, *shape: int | tuple[int, ...]) -> LoraTorchTensor: - if isinstance(shape[0], tuple): - new_shape: tuple[int, ...] = shape[0] - else: - new_shape = cast(tuple[int, ...], shape) - orig_shape = self.shape - if len(new_shape) < 2: - raise NotImplementedError # can't become a vector - - # expand -1 in the shape - if any(dim == -1 for dim in new_shape): - n_elems = prod(orig_shape) - n_new_elems = prod(dim if dim != -1 else 1 for dim in new_shape) - assert n_elems % n_new_elems == 0 - new_shape = (*(dim if dim != -1 else n_elems // n_new_elems for dim in new_shape),) - - if new_shape[-1] != orig_shape[-1]: - raise NotImplementedError # can't reshape the row size trivially - - shape_A = (*(1 for _ in new_shape[:-2]), self._rank, orig_shape[-1]) - shape_B = (*new_shape[:-1], self._rank) - return LoraTorchTensor( - self._lora_A.reshape(shape_A), - self._lora_B.reshape(shape_B), - ) - - def reshape_as(self, other: Tensor) -> LoraTorchTensor: - return self.reshape(*other.shape) - - def view(self, *size: int) -> LoraTorchTensor: - return self.reshape(*size) - - def permute(self, *dims: int) -> LoraTorchTensor: - shape = self.shape - dims = tuple(dim - len(shape) if dim >= 0 else dim for dim in dims) - if dims[-1] == -1: - # TODO: support higher dimensional A shapes bigger than 1 - assert all(dim == 1 for dim in self._lora_A.shape[:-2]) - return LoraTorchTensor(self._lora_A, self._lora_B.permute(*dims)) - if len(shape) == 2 and dims[-1] == -2 and dims[-2] == -1: - return LoraTorchTensor(self._lora_B.permute(*dims), self._lora_A.permute(*dims)) - else: - # TODO: compose the above two - raise NotImplementedError - - def transpose(self, dim0: int, dim1: int) -> LoraTorchTensor: - shape = self.shape - dims = [i for i in range(len(shape))] - dims[dim0], dims[dim1] = dims[dim1], dims[dim0] - return self.permute(*dims) - - def swapaxes(self, axis0: int, axis1: int) -> LoraTorchTensor: - return self.transpose(axis0, axis1) - - def to(self, *args, **kwargs): - return LoraTorchTensor(self._lora_A.to(*args, **kwargs), self._lora_B.to(*args, **kwargs)) - - @classmethod - def __torch_function__(cls, func: Callable, types, args=(), kwargs=None): - del types # unused - - if kwargs is None: - kwargs = {} - - if func is torch.permute: - return type(args[0]).permute(*args, **kwargs) - elif func is torch.reshape: - return type(args[0]).reshape(*args, **kwargs) - elif func is torch.stack: - assert isinstance(args[0], Sequence) - dim = kwargs.get("dim", 0) - assert dim == 0 - return LoraTorchTensor( - torch.stack([a._lora_A for a in args[0]], dim), - torch.stack([b._lora_B for b in args[0]], dim), - ) - elif func is torch.cat: - assert isinstance(args[0], Sequence) - dim = kwargs.get("dim", 0) - assert dim == 0 - if len(args[0][0].shape) > 2: - return LoraTorchTensor( - torch.cat([a._lora_A for a in args[0]], dim), - torch.cat([b._lora_B for b in args[0]], dim), - ) - elif all(torch.equal(args[0][0]._lora_A, t._lora_A) for t in args[0][1:]): - return LoraTorchTensor( - args[0][0]._lora_A, - torch.cat([b._lora_B for b in args[0]], dim), - ) - else: - raise NotImplementedError - else: - raise NotImplementedError - - -def get_base_tensor_name(lora_tensor_name: str) -> str: - base_name = lora_tensor_name.replace("base_model.model.", "") - base_name = base_name.replace(".lora_A.weight", ".weight") - base_name = base_name.replace(".lora_B.weight", ".weight") - return base_name - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Convert a huggingface PEFT LoRA adapter to a GGML compatible file") - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", - ) - parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", - ) - parser.add_argument( - "--bigendian", action="store_true", - help="model is executed on big endian machine", - ) - parser.add_argument( - "--no-lazy", action="store_true", - help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", - ) - parser.add_argument( - "--verbose", action="store_true", - help="increase output verbosity", - ) - parser.add_argument( - "--dry-run", action="store_true", - help="only print out what will be done, without writing any new files", - ) - parser.add_argument( - "--base", type=Path, required=True, - help="directory containing base model file", - ) - parser.add_argument( - "lora_path", type=Path, - help="directory containing LoRA adapter file", - ) - - return parser.parse_args() - - -if __name__ == '__main__': - args = parse_args() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "auto": gguf.LlamaFileType.GUESSED, - } - - ftype = ftype_map[args.outtype] - - dir_base_model: Path = args.base - dir_lora: Path = args.lora_path - lora_config = dir_lora / "adapter_config.json" - input_model = dir_lora / "adapter_model.safetensors" - - if args.outfile is not None: - fname_out = args.outfile - else: - # output in the same directory as the model by default - fname_out = dir_lora - - if os.path.exists(input_model): - # lazy import load_file only if lora is in safetensors format. - from safetensors.torch import load_file - - lora_model = load_file(input_model, device="cpu") - else: - input_model = os.path.join(dir_lora, "adapter_model.bin") - lora_model = torch.load(input_model, map_location="cpu", weights_only=True) - - # load base model - logger.info(f"Loading base model: {dir_base_model.name}") - hparams = Model.load_hparams(dir_base_model) - with torch.inference_mode(): - try: - model_class = Model.from_model_architecture(hparams["architectures"][0]) - except NotImplementedError: - logger.error(f"Model {hparams['architectures'][0]} is not supported") - sys.exit(1) - - class LoraModel(model_class): - model_arch = model_class.model_arch - - lora_alpha: float - - def __init__(self, *args, dir_lora_model: Path, lora_alpha: float, **kwargs): - - super().__init__(*args, **kwargs) - - self.dir_model_card = dir_lora_model - self.lora_alpha = float(lora_alpha) - - def set_type(self): - self.gguf_writer.add_type(gguf.GGUFType.ADAPTER) - self.gguf_writer.add_string(gguf.Keys.Adapter.TYPE, "lora") - - def set_gguf_parameters(self): - self.gguf_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, self.lora_alpha) - super().set_gguf_parameters() - - def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - tensor_map: dict[str, PartialLoraTensor] = {} - - for name, tensor in lora_model.items(): - if self.lazy: - tensor = LazyTorchTensor.from_eager(tensor) - base_name = get_base_tensor_name(name) - is_lora_a = ".lora_A.weight" in name - is_lora_b = ".lora_B.weight" in name - if not is_lora_a and not is_lora_b: - if ".base_layer.weight" in name: - continue - logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor") - sys.exit(1) - - if base_name in tensor_map: - if is_lora_a: - tensor_map[base_name].A = tensor - else: - tensor_map[base_name].B = tensor - else: - if is_lora_a: - tensor_map[base_name] = PartialLoraTensor(A=tensor) - else: - tensor_map[base_name] = PartialLoraTensor(B=tensor) - - for name, tensor in tensor_map.items(): - assert tensor.A is not None - assert tensor.B is not None - yield (name, cast(torch.Tensor, LoraTorchTensor(tensor.A, tensor.B))) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - dest = super().modify_tensors(data_torch, name, bid) - for dest_name, dest_data in dest: - assert isinstance(dest_data, LoraTorchTensor) - lora_a, lora_b = dest_data.get_lora_A_B() - - yield (dest_name + ".lora_a", lora_a) - yield (dest_name + ".lora_b", lora_b) - - with open(lora_config, "r") as f: - lparams: dict[str, Any] = json.load(f) - - alpha: float = lparams["lora_alpha"] - - model_instance = LoraModel( - dir_base_model, - ftype, - fname_out, - is_big_endian=args.bigendian, - use_temp_file=False, - eager=args.no_lazy, - dry_run=args.dry_run, - dir_lora_model=dir_lora, - lora_alpha=alpha, - ) - - logger.info("Exporting model...") - model_instance.write() - logger.info(f"Model successfully exported to {model_instance.fname_out}") diff --git a/examples/CMakeLists.txt b/core/CMakeLists.txt similarity index 82% rename from examples/CMakeLists.txt rename to core/CMakeLists.txt index 1e10862b2..3c298a0df 100644 --- a/examples/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -6,7 +6,7 @@ find_package(Threads REQUIRED) # ... -# examples +# core include_directories(${CMAKE_CURRENT_SOURCE_DIR}) @@ -16,6 +16,4 @@ else() if (GGML_RPC) add_subdirectory(rpc) endif() - if (LLAMA_BUILD_SERVER) - endif() endif() diff --git a/examples/deprecation-warning/README.md b/core/deprecation-warning/README.md similarity index 100% rename from examples/deprecation-warning/README.md rename to core/deprecation-warning/README.md diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/core/deprecation-warning/deprecation-warning.cpp similarity index 100% rename from examples/deprecation-warning/deprecation-warning.cpp rename to core/deprecation-warning/deprecation-warning.cpp diff --git a/examples/main-cmake-pkg/.gitignore b/core/main-cmake-pkg/.gitignore similarity index 100% rename from examples/main-cmake-pkg/.gitignore rename to core/main-cmake-pkg/.gitignore diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/core/main-cmake-pkg/CMakeLists.txt similarity index 100% rename from examples/main-cmake-pkg/CMakeLists.txt rename to core/main-cmake-pkg/CMakeLists.txt diff --git a/examples/main-cmake-pkg/README.md b/core/main-cmake-pkg/README.md similarity index 100% rename from examples/main-cmake-pkg/README.md rename to core/main-cmake-pkg/README.md diff --git a/examples/main/CMakeLists.txt b/core/main/CMakeLists.txt similarity index 100% rename from examples/main/CMakeLists.txt rename to core/main/CMakeLists.txt diff --git a/examples/main/README.md b/core/main/README.md similarity index 100% rename from examples/main/README.md rename to core/main/README.md diff --git a/examples/main/main.cpp b/core/main/main.cpp similarity index 99% rename from examples/main/main.cpp rename to core/main/main.cpp index 08f0b39e4..8b1732a09 100644 --- a/examples/main/main.cpp +++ b/core/main/main.cpp @@ -105,7 +105,7 @@ static void sigint_handler(int signo) { } else { console::cleanup(); printf("\n"); - llama_print_timings(*g_ctx); + antigma_print_timings(*g_ctx); write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens); _exit(130); } @@ -992,7 +992,7 @@ int main(int argc, char ** argv) { llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); } - llama_print_timings(ctx); + antigma_print_timings(ctx); write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens); if (ctx_guidance) { llama_free(ctx_guidance); } diff --git a/examples/rpc/CMakeLists.txt b/core/rpc/CMakeLists.txt similarity index 100% rename from examples/rpc/CMakeLists.txt rename to core/rpc/CMakeLists.txt diff --git a/examples/rpc/README.md b/core/rpc/README.md similarity index 100% rename from examples/rpc/README.md rename to core/rpc/README.md diff --git a/examples/rpc/rpc-server.cpp b/core/rpc/rpc-server.cpp similarity index 100% rename from examples/rpc/rpc-server.cpp rename to core/rpc/rpc-server.cpp diff --git a/examples/sycl/CMakeLists.txt b/core/sycl/CMakeLists.txt similarity index 100% rename from examples/sycl/CMakeLists.txt rename to core/sycl/CMakeLists.txt diff --git a/examples/sycl/README.md b/core/sycl/README.md similarity index 100% rename from examples/sycl/README.md rename to core/sycl/README.md diff --git a/examples/sycl/build.sh b/core/sycl/build.sh similarity index 100% rename from examples/sycl/build.sh rename to core/sycl/build.sh diff --git a/examples/sycl/ls-sycl-device.cpp b/core/sycl/ls-sycl-device.cpp similarity index 100% rename from examples/sycl/ls-sycl-device.cpp rename to core/sycl/ls-sycl-device.cpp diff --git a/examples/sycl/run-llama2.sh b/core/sycl/run-llama2.sh similarity index 100% rename from examples/sycl/run-llama2.sh rename to core/sycl/run-llama2.sh diff --git a/examples/sycl/win-build-sycl.bat b/core/sycl/win-build-sycl.bat similarity index 100% rename from examples/sycl/win-build-sycl.bat rename to core/sycl/win-build-sycl.bat diff --git a/examples/sycl/win-run-llama2.bat b/core/sycl/win-run-llama2.bat similarity index 100% rename from examples/sycl/win-run-llama2.bat rename to core/sycl/win-run-llama2.bat diff --git a/docs/android.md b/docs/android.md deleted file mode 100644 index cec4358d9..000000000 --- a/docs/android.md +++ /dev/null @@ -1,56 +0,0 @@ - -# Android - -## Build on Android using Termux -[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required). -``` -apt update && apt upgrade -y -apt install git make cmake -``` - -It's recommended to move your model inside the `~/` directory for best performance: -``` -cd storage/downloads -mv model.gguf ~/ -``` - -[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`. - -## Building the Project using Android NDK -Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake. - -Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux: -``` -$ mkdir build-android -$ cd build-android -$ export NDK= -$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod .. -$ make -``` - -Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice). - -Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission: - -(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`) -``` -$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/ -$cd /data/data/com.termux/files/home/bin -$chmod +x ./* -``` - -Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/` - -``` -$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/ -``` - -Now, you can start chatting: -``` -$cd /data/data/com.termux/files/home/bin -$./llama-cli -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml -``` - -Here's a demo of an interactive session running on Pixel 5 phone: - -https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4 diff --git a/docs/backend/BLIS.md b/docs/backend/BLIS.md deleted file mode 100644 index 35d06bd0f..000000000 --- a/docs/backend/BLIS.md +++ /dev/null @@ -1,67 +0,0 @@ -BLIS Installation Manual ------------------------- - -BLIS is a portable software framework for high-performance BLAS-like dense linear algebra libraries. It has received awards and recognition, including the 2023 James H. Wilkinson Prize for Numerical Software and the 2020 SIAM Activity Group on Supercomputing Best Paper Prize. BLIS provides a new BLAS-like API and a compatibility layer for traditional BLAS routine calls. It offers features such as object-based API, typed API, BLAS and CBLAS compatibility layers. - -Project URL: https://github.com/flame/blis - -### Prepare: - -Compile BLIS: - -```bash -git clone https://github.com/flame/blis -cd blis -./configure --enable-cblas -t openmp,pthreads auto -# will install to /usr/local/ by default. -make -j -``` - -Install BLIS: - -```bash -sudo make install -``` - -We recommend using openmp since it's easier to modify the cores being used. - -### llama.cpp compilation - -Makefile: - -```bash -make GGML_BLIS=1 -j -# make GGML_BLIS=1 llama-benchmark-matmult -``` - -CMake: - -```bash -mkdir build -cd build -cmake -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME .. -make -j -``` - -### llama.cpp execution - -According to the BLIS documentation, we could set the following -environment variables to modify the behavior of openmp: - -```bash -export GOMP_CPU_AFFINITY="0-19" -export BLIS_NUM_THREADS=14 -``` - -And then run the binaries as normal. - - -### Intel specific issue - -Some might get the error message saying that `libimf.so` cannot be found. -Please follow this [stackoverflow page](https://stackoverflow.com/questions/70687930/intel-oneapi-2022-libimf-so-no-such-file-or-directory-during-openmpi-compila). - -### Reference: - -1. https://github.com/flame/blis#getting-started -2. https://github.com/flame/blis/blob/master/docs/Multithreading.md diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md deleted file mode 100644 index d36ac0a15..000000000 --- a/docs/backend/SYCL.md +++ /dev/null @@ -1,580 +0,0 @@ -# llama.cpp for SYCL - -- [Background](#background) -- [Recommended Release](#recommended-release) -- [News](#news) -- [OS](#os) -- [Hardware](#hardware) -- [Docker](#docker) -- [Linux](#linux) -- [Windows](#windows) -- [Environment Variable](#environment-variable) -- [Known Issue](#known-issues) -- [Q&A](#qa) -- [TODO](#todo) - -## Background - -**SYCL** is a high-level parallel programming model designed to improve developers productivity writing code across various hardware accelerators such as CPUs, GPUs, and FPGAs. It is a single-source language designed for heterogeneous computing and based on standard C++17. - -**oneAPI** is an open ecosystem and a standard-based specification, supporting multiple architectures including but not limited to intel CPUs, GPUs and FPGAs. The key components of the oneAPI ecosystem include: - -- **DPCPP** *(Data Parallel C++)*: The primary oneAPI SYCL implementation, which includes the icpx/icx Compilers. -- **oneAPI Libraries**: A set of highly optimized libraries targeting multiple domains *(e.g. oneMKL - Math Kernel Library)*. -- **oneAPI LevelZero**: A high performance low level interface for fine-grained control over intel iGPUs and dGPUs. -- **Nvidia & AMD Plugins**: These are plugins extending oneAPI's DPCPP support to SYCL on Nvidia and AMD GPU targets. - -### Llama.cpp + SYCL - -The llama.cpp SYCL backend is designed to support **Intel GPU** firstly. Based on the cross-platform feature of SYCL, it could support other vendor GPUs: Nvidia GPU (*AMD GPU coming*). - -When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneMKL](README.md#intel-onemkl) backend. - -It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [Intel® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose. - -## Recommended Release - -The SYCL backend would be broken by some PRs due to no online CI. - -The following release is verified with good quality: - -|Commit ID|Tag|Release|Verified Platform| -|-|-|-|-| -|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| - - -## News - -- 2024.5 - - Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770. - - Arch Linux is verified successfully. - -- 2024.4 - - Support data types: GGML_TYPE_IQ4_NL, GGML_TYPE_IQ4_XS, GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ3_S, GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M. - -- 2024.3 - - Release binary files of Windows. - - A blog is published: **Run LLM on all Intel GPUs Using llama.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-llama-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-llama-cpp-fd2e2dcbd9bd). - - New base line is ready: [tag b2437](https://github.com/ggerganov/llama.cpp/tree/b2437). - - Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing. - - Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE. - - Support detecting all GPUs with level-zero and same top **Max compute units**. - - Support OPs - - hardsigmoid - - hardswish - - pool2d - -- 2024.1 - - Create SYCL backend for Intel GPU. - - Support Windows build - -## OS - -| OS | Status | Verified | -|---------|---------|------------------------------------------------| -| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39, Arch Linux | -| Windows | Support | Windows 11 | - - -## Hardware - -### Intel GPU - -**Verified devices** - -| Intel GPU | Status | Verified Model | -|-------------------------------|---------|---------------------------------------| -| Intel Data Center Max Series | Support | Max 1550, 1100 | -| Intel Data Center Flex Series | Support | Flex 170 | -| Intel Arc Series | Support | Arc 770, 730M, Arc A750 | -| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake | -| Intel iGPU | Support | iGPU in i5-1250P, i7-1260P, i7-1165G7 | - -*Notes:* - -- **Memory** - - The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/llama-cli`. - - - Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *llama-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU. - -- **Execution Unit (EU)** - - If the iGPU has less than 80 EUs, the inference speed will likely be too slow for practical use. - -### Other Vendor GPU - -**Verified devices** - -| Nvidia GPU | Status | Verified Model | -|--------------------------|---------|----------------| -| Ampere Series | Support | A100, A4000 | -| Ampere Series *(Mobile)* | Support | RTX 40 Series | - -## Docker -The docker build option is currently limited to *intel GPU* targets. - -### Build image -```sh -# Using FP16 -docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" -f .devops/llama-cli-intel.Dockerfile . -``` - -*Notes*: - -To build in default FP32 *(Slower than FP16 alternative)*, you can remove the `--build-arg="GGML_SYCL_F16=ON"` argument from the previous command. - -You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative. - -### Run container - -```sh -# First, find all the DRI cards -ls -la /dev/dri -# Then, pick the card that you want to use (here for e.g. /dev/dri/card1). -docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -``` - -*Notes:* -- Docker has been tested successfully on native Linux. WSL support has not been verified yet. -- You may need to install Intel GPU driver on the **host** machine *(Please refer to the [Linux configuration](#linux) for details)*. - -## Linux - -### I. Setup Environment - -1. **Install GPU drivers** - - - **Intel GPU** - -Intel data center GPUs drivers installation guide and download page can be found here: [Get intel dGPU Drivers](https://dgpu-docs.intel.com/driver/installation.html#ubuntu-install-steps). - -*Note*: for client GPUs *(iGPU & Arc A-Series)*, please refer to the [client iGPU driver installation](https://dgpu-docs.intel.com/driver/client/overview.html). - -Once installed, add the user(s) to the `video` and `render` groups. - -```sh -sudo usermod -aG render $USER -sudo usermod -aG video $USER -``` - -*Note*: logout/re-login for the changes to take effect. - -Verify installation through `clinfo`: - -```sh -sudo apt install clinfo -sudo clinfo -l -``` - -Sample output: - -```sh -Platform #0: Intel(R) OpenCL Graphics - `-- Device #0: Intel(R) Arc(TM) A770 Graphics - -Platform #0: Intel(R) OpenCL HD Graphics - `-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49] -``` - -- **Nvidia GPU** - -In order to target Nvidia GPUs through SYCL, please make sure the CUDA/CUBLAS native requirements *-found [here](README.md#cuda)-* are installed. - -2. **Install Intel® oneAPI Base toolkit** - -- **For Intel GPU** - -The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page. - -Please follow the instructions for downloading and installing the Toolkit for Linux, and preferably keep the default installation values unchanged, notably the installation path *(`/opt/intel/oneapi` by default)*. - -Following guidelines/code snippets assume the default installation values. Otherwise, please make sure the necessary changes are reflected where applicable. - -Upon a successful installation, SYCL is enabled for the available intel devices, along with relevant libraries such as oneAPI MKL for intel GPUs. - -- **Adding support to Nvidia GPUs** - -**oneAPI Plugin**: In order to enable SYCL support on Nvidia GPUs, please install the [Codeplay oneAPI Plugin for Nvidia GPUs](https://developer.codeplay.com/products/oneapi/nvidia/download). User should also make sure the plugin version matches the installed base toolkit one *(previous step)* for a seamless "oneAPI on Nvidia GPU" setup. - - -**oneMKL for cuBlas**: The current oneMKL releases *(shipped with the oneAPI base-toolkit)* do not contain the cuBLAS backend. A build from source of the upstream [oneMKL](https://github.com/oneapi-src/oneMKL) with the *cuBLAS* backend enabled is thus required to run it on Nvidia GPUs. - -```sh -git clone https://github.com/oneapi-src/oneMKL -cd oneMKL -cmake -B buildWithCublas -DCMAKE_CXX_COMPILER=icpx -DCMAKE_C_COMPILER=icx -DENABLE_MKLGPU_BACKEND=OFF -DENABLE_MKLCPU_BACKEND=OFF -DENABLE_CUBLAS_BACKEND=ON -DTARGET_DOMAINS=blas -cmake --build buildWithCublas --config Release -``` - - -3. **Verify installation and environment** - -In order to check the available SYCL devices on the machine, please use the `sycl-ls` command. -```sh -source /opt/intel/oneapi/setvars.sh -sycl-ls -``` - -- **Intel GPU** - -When targeting an intel GPU, the user should expect one or more level-zero devices among the available SYCL devices. Please make sure that at least one GPU is present, for instance [`ext_oneapi_level_zero:gpu:0`] in the sample output below: - -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] -[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] -[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50] -[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918] -``` - -- **Nvidia GPU** - -Similarly, user targeting Nvidia GPUs should expect at least one SYCL-CUDA device [`ext_oneapi_cuda:gpu`] as bellow: -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.12.0.12_195853.xmain-hotfix] -[opencl:cpu:1] Intel(R) OpenCL, Intel(R) Xeon(R) Gold 6326 CPU @ 2.90GHz OpenCL 3.0 (Build 0) [2023.16.12.0.12_195853.xmain-hotfix] -[ext_oneapi_cuda:gpu:0] NVIDIA CUDA BACKEND, NVIDIA A100-PCIE-40GB 8.0 [CUDA 12.2] -``` - -### II. Build llama.cpp - -#### Intel GPU -```sh -# Export relevant ENV variables -source /opt/intel/oneapi/setvars.sh - -# Build LLAMA with MKL BLAS acceleration for intel GPU - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - -# Option 2: Use FP16 -cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON - -# build all binary -cmake --build build --config Release -j -v -``` - -#### Nvidia GPU -```sh -# Export relevant ENV variables -export LD_LIBRARY_PATH=/path/to/oneMKL/buildWithCublas/lib:$LD_LIBRARY_PATH -export LIBRARY_PATH=/path/to/oneMKL/buildWithCublas/lib:$LIBRARY_PATH -export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithCublas/include:$CPLUS_INCLUDE_DIR -export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR - -# Build LLAMA with Nvidia BLAS acceleration through SYCL - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - -# Option 2: Use FP16 -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON - -# build all binary -cmake --build build --config Release -j -v - -``` - -### III. Run the inference - -1. Retrieve and prepare model - -You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. - -2. Enable oneAPI running environment - -```sh -source /opt/intel/oneapi/setvars.sh -``` - -3. List devices information - -Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: - -```sh -./build/bin/llama-ls-sycl-device -``` -This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: -``` -found 2 SYCL devices: - -| | | |Compute |Max compute|Max work|Max sub| | -|ID| Device Type| Name|capability|units |group |group |Global mem size| -|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------| -| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136| -| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216| -``` - - -4. Launch inference - -There are two device selection modes: - -- Single device: Use one device target specified by the user. -- Multiple devices: Automatically choose the devices with the same backend. - -In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR. - -| Device selection | Parameter | -|------------------|----------------------------------------| -| Single device | --split-mode none --main-gpu DEVICE_ID | -| Multiple devices | --split-mode layer (default) | - -Examples: - -- Use device 0: - -```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 -``` -or run by script: - -```sh -./examples/sycl/run_llama2.sh 0 -``` - -- Use multiple devices: - -```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer -``` - -Otherwise, you can run the script: - -```sh -./examples/sycl/run_llama2.sh -``` - -*Notes:* - -- Upon execution, verify the selected device(s) ID(s) in the output log, which can for instance be displayed as follow: - -```sh -detect 1 SYCL GPUs: [0] with top Max compute units:512 -``` -Or -```sh -use 1 SYCL GPUs: [0] with Max compute units:512 -``` - -## Windows - -### I. Setup Environment - -1. Install GPU driver - -Intel GPU drivers instructions guide and download page can be found here: [Get intel GPU Drivers](https://www.intel.com/content/www/us/en/products/docs/discrete-gpus/arc/software/drivers.html). - -2. Install Visual Studio - -If you already have a recent version of Microsoft Visual Studio, you can skip this step. Otherwise, please refer to the official download page for [Microsoft Visual Studio](https://visualstudio.microsoft.com/). - -3. Install Intel® oneAPI Base toolkit - -The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page. - -Please follow the instructions for downloading and installing the Toolkit for Windows, and preferably keep the default installation values unchanged, notably the installation path *(`C:\Program Files (x86)\Intel\oneAPI` by default)*. - -Following guidelines/code snippets assume the default installation values. Otherwise, please make sure the necessary changes are reflected where applicable. - -b. Enable oneAPI running environment: - -- Type "oneAPI" in the search bar, then open the `Intel oneAPI command prompt for Intel 64 for Visual Studio 2022` App. - -- On the command prompt, enable the runtime environment with the following: -``` -"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 -``` - -c. Verify installation - -In the oneAPI command line, run the following to print the available SYCL devices: - -``` -sycl-ls -``` - -There should be one or more *level-zero* GPU devices displayed as **[ext_oneapi_level_zero:gpu]**. Below is example of such output detecting an *intel Iris Xe* GPU as a Level-zero SYCL device: - -Output (example): -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] -[opencl:cpu:1] Intel(R) OpenCL, 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] -[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Iris(R) Xe Graphics OpenCL 3.0 NEO [31.0.101.5186] -[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Iris(R) Xe Graphics 1.3 [1.3.28044] -``` - -4. Install build tools - -a. Download & install cmake for Windows: https://cmake.org/download/ (CMake can also be installed from Visual Studio Installer) -b. The new Visual Studio will install Ninja as default. (If not, please install it manually: https://ninja-build.org/) - - -### II. Build llama.cpp - -On the oneAPI command line window, step into the llama.cpp main directory and run the following: - -``` -@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -G "Ninja" -DGGML_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release - -# Option 2: Or FP16 -cmake -B build -G "Ninja" -DGGML_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DGGML_SYCL_F16=ON - -cmake --build build --config Release -j -``` - -Otherwise, run the `win-build-sycl.bat` wrapper which encapsulates the former instructions: -```sh -.\examples\sycl\win-build-sycl.bat -``` - -Or, use CMake presets to build: -```sh -cmake --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli - -cmake -DGGML_SYCL_F16=ON --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli - -cmake --preset x64-windows-sycl-debug -cmake --build build-x64-windows-sycl-debug -j --target llama-cli -``` - -Or, you can use Visual Studio to open llama.cpp folder as a CMake project. Choose the sycl CMake presets (`x64-windows-sycl-release` or `x64-windows-sycl-debug`) before you compile the project. - -*Notes:* - -- In case of a minimal experimental setup, the user can build the inference executable only through `cmake --build build --config Release -j --target llama-cli`. - -### III. Run the inference - -1. Retrieve and prepare model - -You can refer to the general [*Prepare and Quantize*](README#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. - -2. Enable oneAPI running environment - -On the oneAPI command line window, run the following and step into the llama.cpp directory: -``` -"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 -``` - -3. List devices information - -Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: - -``` -build\bin\ls-sycl-device.exe -``` - -This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: -``` -found 2 SYCL devices: -| | | |Compute |Max compute|Max work|Max sub| | -|ID| Device Type| Name|capability|units |group |group |Global mem size| -|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------| -| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136| -| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216| - -``` - - -4. Launch inference - -There are two device selection modes: - -- Single device: Use one device assigned by user. Default device id is 0. -- Multiple devices: Automatically choose the devices with the same backend. - -In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR. - -| Device selection | Parameter | -|------------------|----------------------------------------| -| Single device | --split-mode none --main-gpu DEVICE_ID | -| Multiple devices | --split-mode layer (default) | - -Examples: - -- Use device 0: - -``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0 -``` - -- Use multiple devices: - -``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer -``` -Otherwise, run the following wrapper script: - -``` -.\examples\sycl\win-run-llama2.bat -``` - -Note: - -- Upon execution, verify the selected device(s) ID(s) in the output log, which can for instance be displayed as follow: - -```sh -detect 1 SYCL GPUs: [0] with top Max compute units:512 -``` -Or -```sh -use 1 SYCL GPUs: [0] with Max compute units:512 -``` - -## Environment Variable - -#### Build - -| Name | Value | Function | -|--------------------|-----------------------------------|---------------------------------------------| -| GGML_SYCL | ON (mandatory) | Enable build with SYCL code path. | -| GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA | Set the SYCL target device type. | -| GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. | -| CMAKE_C_COMPILER | icx | Set *icx* compiler for SYCL code path. | -| CMAKE_CXX_COMPILER | icpx *(Linux)*, icx *(Windows)* | Set `icpx/icx` compiler for SYCL code path. | - -#### Runtime - -| Name | Value | Function | -|-------------------|------------------|---------------------------------------------------------------------------------------------------------------------------| -| GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG | -| ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.
Recommended to use when --split-mode = layer | - -## Known Issues - -- `Split-mode:[row]` is not supported. - -## Q&A - -- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`. - - - Potential cause: Unavailable oneAPI installation or not set ENV variables. - - Solution: Install *oneAPI base toolkit* and enable its ENV through: `source /opt/intel/oneapi/setvars.sh`. - -- General compiler error: - - - Remove **build** folder or try a clean-build. - -- I can **not** see `[ext_oneapi_level_zero:gpu]` afer installing the GPU driver on Linux. - - Please double-check with `sudo sycl-ls`. - - If it's present in the list, please add video/render group to your user then **logout/login** or restart your system: - - ``` - sudo usermod -aG render $USER - sudo usermod -aG video $USER - ``` - Otherwise, please double-check the GPU driver installation steps. - -### **GitHub contribution**: -Please add the **[SYCL]** prefix/tag in issues/PRs titles to help the SYCL-team check/address them without delay. - -## TODO - -- Support row layer split for multiple card runs. diff --git a/docs/build.md b/docs/build.md deleted file mode 100644 index d9d12c467..000000000 --- a/docs/build.md +++ /dev/null @@ -1,340 +0,0 @@ -# Build llama.cpp locally - -**To get the Code:** - -```bash -git clone https://github.com/ggerganov/llama.cpp -cd llama.cpp -``` - -In order to build llama.cpp you have four different options. - -- Using `make`: - - On Linux or MacOS: - - ```bash - make - ``` - - - On Windows (x86/x64 only, arm64 requires cmake): - - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Extract `w64devkit` on your pc. - 3. Run `w64devkit.exe`. - 4. Use the `cd` command to reach the `llama.cpp` folder. - 5. From here you can run: - ```bash - make - ``` - - - Notes: - - For `Q4_0_4_4` quantization type build, add the `GGML_NO_LLAMAFILE=1` flag. For example, use `make GGML_NO_LLAMAFILE=1`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, run `make LLAMA_DEBUG=1` - -- Using `CMake`: - - ```bash - cmake -B build - cmake --build build --config Release - ``` - - **Notes**: - - - For `Q4_0_4_4` quantization type build, add the `-DGGML_LLAMAFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_LLAMAFILE=OFF`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, there are two cases: - - 1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag): - - ```bash - cmake -B build -DCMAKE_BUILD_TYPE=Debug - cmake --build build - ``` - - 2. Multi-config generators (`-G` param set to Visual Studio, XCode...): - - ```bash - cmake -B build -G "Xcode" - cmake --build build --config Debug - ``` - - Building for Windows (x86, x64 and arm64) with MSVC or clang as compilers: - - Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/de/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...): - - Tab Workload: Desktop-development with C++ - - Tab Components (select quickly via search): C++-_CMake_ Tools for Windows, _Git_ for Windows, C++-_Clang_ Compiler for Windows, MS-Build Support for LLVM-Toolset (clang) - - Please remember to always use a Developer Command Prompt / PowerShell for VS2022 for git, build, test - - For Windows on ARM (arm64, WoA) build with: - ```bash - cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF - cmake --build build-arm64-windows-llvm-release - ``` - Note: Building for arm64 could also be done just with MSVC (with the build-arm64-windows-MSVC preset, or the standard CMake build instructions). But MSVC does not support inline ARM assembly-code, used e.g. for the accelerated Q4_0_4_8 CPU kernels. - -- Using `gmake` (FreeBSD): - - 1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics) - 2. Add your user to **video** group - 3. Install compilation dependencies. - - ```bash - sudo pkg install gmake automake autoconf pkgconf llvm15 openblas - - gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4 - ``` - -## Metal Build - -On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU. -To disable the Metal build at compile time use the `GGML_NO_METAL=1` flag or the `GGML_METAL=OFF` cmake option. - -When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line -argument. - -## BLAS Build - -Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use: - -### Accelerate Framework: - -This is only available on Mac PCs and it's enabled by default. You can just build using the normal instructions. - -### OpenBLAS: - -This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS installed on your machine. - -- Using `make`: - - On Linux: - ```bash - make GGML_OPENBLAS=1 - ``` - - - On Windows: - - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Download the latest version of [OpenBLAS for Windows](https://github.com/xianyi/OpenBLAS/releases). - 3. Extract `w64devkit` on your pc. - 4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`. - 5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`. - 6. Run `w64devkit.exe`. - 7. Use the `cd` command to reach the `llama.cpp` folder. - 8. From here you can run: - - ```bash - make GGML_OPENBLAS=1 - ``` - -- Using `CMake` on Linux: - - ```bash - cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS - cmake --build build --config Release - ``` - -### BLIS - -Check [BLIS.md](./backend/BLIS.md) for more information. - -### SYCL - -SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. - -llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). - -For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). - -### Intel oneMKL - -Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./backend/SYCL.md). - -- Using manual oneAPI installation: - By default, `GGML_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DGGML_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps: - ```bash - source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation - cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_NATIVE=ON - cmake --build build --config Release - ``` - -- Using oneAPI docker image: - If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above. - -Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. - -### CUDA - -This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). - -For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling. - -- Using `make`: - ```bash - make GGML_CUDA=1 - ``` -- Using `CMake`: - - ```bash - cmake -B build -DGGML_CUDA=ON - cmake --build build --config Release - ``` - -The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance: - -| Option | Legal values | Default | Description | -|-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | -| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | -| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. | -| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | -| GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models | -| GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | -| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | -| GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | -| GGML_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. | - -### hipBLAS - -This provides BLAS acceleration on HIP-supported AMD GPUs. -Make sure to have ROCm installed. -You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html#rocm-install-quick). - -- Using `make`: - ```bash - make GGML_HIPBLAS=1 - ``` -- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU): - ```bash - HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ - cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build --config Release -- -j 16 - ``` - On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`. - However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). - - Note that if you get the following error: - ``` - clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library - ``` - Try searching for a directory under `HIP_PATH` that contains the file - `oclc_abi_version_400.bc`. Then, add the following to the start of the - command: `HIP_DEVICE_LIB_PATH=`, so something - like: - ```bash - HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \ - HIP_DEVICE_LIB_PATH= \ - cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build -- -j 16 - ``` - -- Using `make` (example for target gfx1030, build with 16 CPU threads): - ```bash - make -j16 GGML_HIPBLAS=1 GGML_HIP_UMA=1 AMDGPU_TARGETS=gfx1030 - ``` - -- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): - ```bash - set PATH=%HIP_PATH%\bin;%PATH% - cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DGGML_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release - cmake --build build - ``` - Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors) - Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`. - - -The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. -If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. -The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above): - -| Option | Legal values | Default | Description | -|------------------------|------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the HIP dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | -| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the HIP mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. | -| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per HIP thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | - -### Vulkan - -**Windows** - -#### w64devkit - -Download and extract [w64devkit](https://github.com/skeeto/w64devkit/releases). - -Download and install the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home#windows). When selecting components, only the Vulkan SDK Core is required. - -Launch `w64devkit.exe` and run the following commands to copy Vulkan dependencies: -```sh -SDK_VERSION=1.3.283.0 -cp /VulkanSDK/$SDK_VERSION/Bin/glslc.exe $W64DEVKIT_HOME/bin/ -cp /VulkanSDK/$SDK_VERSION/Lib/vulkan-1.lib $W64DEVKIT_HOME/x86_64-w64-mingw32/lib/ -cp -r /VulkanSDK/$SDK_VERSION/Include/* $W64DEVKIT_HOME/x86_64-w64-mingw32/include/ -cat > $W64DEVKIT_HOME/x86_64-w64-mingw32/lib/pkgconfig/vulkan.pc < ` - -It will then build & run in the debugger for you. - -To just execute a test and get back a PASS or FAIL message run: - -```bash -./scripts/debug-test.sh test-tokenizer -``` - -To test in GDB use the `-g` flag to enable gdb test mode. - -```bash -./scripts/debug-test.sh -g test-tokenizer - -# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows: ->>> b main -``` - -To speed up the testing loop, if you know your test number you can just run it similar to below: - -```bash -./scripts/debug-test.sh test 23 -``` - -For further reference use `debug-test.sh -h` to print help. - -  - -### How does the script work? -If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below. - -#### Step 1: Reset and Setup folder context - -From base of this repository, let's create `build-ci-debug` as our build context. - -```bash -rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug -``` - -#### Step 2: Setup Build Environment and Compile Test Binaries - -Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults. - -```bash -cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON .. -make -j -``` - -#### Step 3: Find all tests available that matches REGEX - -The output of this command will give you the command & arguments needed to run GDB. - -* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex) -* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB. -* `-V` : Verbose Mode - -```bash -ctest -R "test-tokenizer" -V -N -``` - -This may return output similar to below (focusing on key lines to pay attention to): - -```bash -... -1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" -1: Working Directory: . -Labels: main - Test #1: test-tokenizer-0-llama-spm -... -4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf" -4: Working Directory: . -Labels: main - Test #4: test-tokenizer-0-falcon -... -``` - -#### Step 4: Identify Test Command for Debugging - -So for test #1 above we can tell these two pieces of relevant information: -* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0` -* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf` - -#### Step 5: Run GDB on test command - -Based on the ctest 'test command' report above we can then run a gdb session via this command below: - -```bash -gdb --args ${Test Binary} ${Test GGUF Model} -``` - -Example: - -```bash -gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" -``` diff --git a/docs/development/llama-star/idea-arch.key b/docs/development/llama-star/idea-arch.key deleted file mode 100755 index 3e068e7075c2ebb53a270b57c51e9281618d8f29..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 488591 zcmbTd1ymft(=WQXLxA9#EQAo;AvgpJA-Dv04eo)kIKeGQ(BKvzxbNaF2^tm&PVij_ zu)wn1&Huaie)qid-Z}TY+U=R&>8`Hn>gk#8>Z;LF$HIOL_(#Rg5Cg(_6a!H|0Prt~ zpz~f?`B?F}`tT?T%8M&0$jkHaO9+Yc2#bln;!%(n7T{49Rg&NrQhFsJF3Qg%!7nDj z>+Iq1@atg%@aVO&nlb_AA!^!hq3#g%2kHaV^QKM8314`%-ld^FFL zu$&BN#6Acm$Ua6Wggk6vK37vzw9wMgR91VV^q)py(pq_Vy#H6()y>CCM@50v=$$bu z-X6L&|Js8lD;sYQc`dCs|M34h|HJ=HSBw9}JIC`+*71Ewl6aKLfNOJ)<_9P(-+%M{ zzam0gJ8v6w2@;+7Z9Keu&>g{z##;V99{+GK8b9_zHwukY{$Ypz!lnPP^?%{^|M2MP zD57~L(U{f7(aIK$kI|Uh`v1sx_&>1gd*6T8{YU>aCAM?Z*F~TIjR8i$D}WQg8{h$W z53m9R0$2fB0HOa6e&PS{D+1in+<^cubUg=vBftk;M;@K?Pl6p9s{z~qHUM4#HyR59 z_|Zx5Uq7SkqUmbkfA$~uf9FN~1^}cX4-XGr|DE?50su6K0RV&@|DE?-8~`9E0suxk zJgmH|{v!?xeZ;g!kFfI!0D$Bj06_TzUB|@RUP$mi4gEn`Y*N6$N$po|DhK-S}#m& zY%FZNe|lkH`u|g$92@6}04{~RHlCFyT80~S6z=P@5ECEX!TvYcG4ZiS#7aLfj_)W^DFuv0Fi^TxS+)Q{F(M0H|$3$`v;=l`S0QHTyoL^)Ze8||8hDG`-b9 zxrjG5NfN9V*j`UfqO>y&CFmGt;BL$@`9}``YDX_M-lEOelf8!cVCN2@?-(JzBMPM( z8zF%uUohIXAd~^hORrmIPgV)UxOAFzfN2bC z#U@QvtF@x3P9;#rMmck7PJu!#zb#)Qegxt17NEDYGp#S<1@m(|#Eix!7b` zblBUp_0;g|xL|t1!w-NA<5{2RpyekT*@XVj-+R%iDE93?3zcC*?8Ej4W}NV@L^Fb} zd_Hx5TVZ$jGb^C;0N}$*DJaDl*4*76k>=ABIDCnH`}!?~NC8pqmm-DHcYdEfY29V? zt=1+4cVEQa6C+l(huSDt4$d_km+YcOH7bEoq1!HGS$=XPp#(?6Bq2fp7dkN`>)h`6 zf5=xQ@U3#Va?=81rxzJw02?fyZRl`QW{N@9WN`)5Fq#vh@*}MPFX*|M)SD%*H@lO? zjyasx6jYK_Z7_+k9k@jhYNfCZay$Uj?b8jYS4GH|KLGmWD~D5>N>c}yBdxGbVpy?* zF@82;*+cjq0JhTfX1ZH4^;HvqoN>nft^)p76$w?#yA8eiz^bFv#g0M*CwuL1{dPT8 zuuPkgo=Z+fvmmQ)ngTFYneN6xyFEEO;Y8X6+HYlZ{ISHi!0f{r^t|K}YeQ^^D_)v( zX|RARQMikRT+J#-s8Z8GYTt`3A}E{c00dQjAPK9BT_WQCIka!1eF$m(2B7 zvE3bmi03ww&L)w)Y&Ltul7?wWaoALAt&>dFq?j0O%Igr0HRl=FFYCK(uvNC`&v3qvtP_aI@Tkh?JylN((D#c$+k2Mut$P}|pN4!I3VhqznBx!AMnmjwe$e)8TVhY=y(3Ky3t0y`uC&BqdP`BxBbk%W%?Mo197qliRlA zmjbb64pJX+AQq1z2sq#FMM-ckE!I6#13HkaW&F-w&Um+w>vbw|=n?R(X3;5VeE9Q$ zw|Hp zC40k3D27=rvCo=xgJ=LhSzkURUi6}<0XAAiSWDo^t-ZyST*?oa7;Ze&qRvY^3a{_w zz;c(#^F-KpcPn!$nVywJy5?yP$p}o?T4v|DEvq@OQt>8RjY^wWqM6^QGHDGLi1V(T@ygLkOx=+s zLSa|6BDyN@^MfIR@h-WlfDVIshI6{ri*FKFDVwtSK}-- zT#|AL;vVc?3EkB5sFnuQk)92SHKxbnQ-$-i@F=XGUTB8DD{4s4@Pb!aJ+=#XPYG41 z+tXk55R^M~St{Ht`WZrUAR{_o5-vGI=}bdiR3F*!0B~3~8E}1Rc7m1BK|`kky-~XV z#a;u})pnLPX(-8cWea@Vd9o5*tRr;ja_N7CjaK6g>gNNXjXLZBK*XTj#LNiOktC2( zsaCa$`iZfA_5FnJB`2*DhEX5tsr*PCY8r$CvU&huAvszRH+ObZ>}?UQwcUwi3UfTE zEoYQ#U&(kT2VB&sYEpvR?R>SexwTbh4p9W?>r@51!*_R=qJ2OlrcS@f-t#_XX8d^) zJds-yV1@luL!?oOGA;7Si*}+8g}l@H*lpqm02Tgt((CzEpU>Zp3mqQ-LYg7*lXp^n zYGD(OcW*wvb!yIu3<96EO;{+7H@hP^>W`)2{#%0&004>sX}uWCw@qvB_-zX=n(rl=9fpcs`ypB1wt%+>)3+U=blI(ewxJ>mOo%|*a{nCE+c;O zcAM&RSxIX5B(D@Q)SqhGHTVnss2>7blzqf4u-3f($9zBw1^I2LQ24{k8jUvk&5%eFdo*HU(n(aEI zoWx82W5^8Lq5n5b`)oP=3W)AwrQqkVLnLOBEF;nc{-%znMe{Lj-Oj8lP#1i=@)o}p zWtD=znWnl!xRPy`mAscb%q#+r8FZ&u3m6%pFPmo4X$X`v<-0rPE zI=fm%@Rd9O`cy*rg4q$&>yHZ23j^}M3!RHCl8HJ&A}=*#iv3jRKZQGeW}6Gtd~Ee4 z<4Zb5cErumod&4yrIaHWA1-!6b|--pfs=g_J_%WrB}Ev26jQTP?=qnxs6M)W#4+Wz zG?`ZR&6Qpvh9ufw{9cH4re<64gVC{0i<)qUP5pg$Z3W+#B)hD|0qwmZnRn}}nN64H zviTRni*pH#{1Vt6@>I7McN|DJ*nT{c&jE(I?W^gcPY2n16*zChhhiyNj?DF$PgUNMY1DYqulONe5Kj1Zil)f$k`H#%!)sz6}{sZ8&t!}@1-=G z8)6s3DG}#fr+Dk4cu{c`i(uLQ9mXu3)nE9CTefMxey0yOsUzn=k0)f_~KC4^x7t;TK<*G0xyzTO=U>w&K0r3@(ia73BW?Z z3J8&{I}j66WZ~4u%>0+Ddb}a@bsB-%<6T?DB<()kRl|ep{(R8D1E7Vh*W@>j%Mgoh zLzCEx9y=S%j1Eiwx7e>n1r`=u&S^}3xCcwrn+F8xGPrTu2Usu>C?+J^%m?X@dG!!f z=k;9AqrS^p{zh;^!b3Qn4Im^Cr+1VC(97|UIZSG$zH@GC4N=^aA;ZT3(82z}OU*8e z9uu&z$ zgTV2&f)#F?txY#=*xANOc+IXKc&6c6N;QAy_1-D`qH^0vHoEq>=C`uW z)r#;}fg`!QYOFV36t?$B=9a#W{ox|1gG!33cRX8%H>orXd|}g^HuAjiacI51hT~~EF)g$-d#|jux6xYd zQ9YFkC)gonNe#s~FyoGBYF*p`|FCX8k8ErTt&_iDz`~BcTRtj4AYYJhy=t2OyL_Ut zqwJyI{U>_rII%57C?aEwmTKHQ0IYcuQLgBf3rtj$u3YKB^-%WWANVfOJ0o(yLK>_n znomA|QtXtU9hj!J+rgUPz?w9IB|dii{86jMmNdb?J{+c9?0+T9&jb)P*2rw z>TJxHic`B*8t?nG5tr<)VZI(Hb$KW-QpOKV9-KYmAs*TaOR%z6AVO~D+7Dx4x5@vw ztgBF&t+StJq2Sh`9sI${miSGay`4~As@^$fAjPVdJ%~VugBUA+ns)ozZG%sObz{Tw z&d}d`lZaqjzgb6hw@Z^?`pIOQ;Hgw$J_Msz8js2WMXvGyD69gVq1UuqP7+}c zKklkmh7EFDix1YBY^>=rtWw&Z=+d%=7a3uC`K1MNNYt4qBT}WCe;#-_3pF>IN>S4J zVYN9rULjY3u1`El@(SzsV?Ke!4~^eu^OkmAFj5pT5GxRu8>UrYCkb3$)<2E83T+3s zhmWyOUKFN4W?DXDqz%v>Bn50An3qjE0I%H@5nbL43<0~Ur%&H8KAP(2)w{6>TWDkS zlIs&6a{Gm}MmTKNoTRFCj;rc8w6*I;ExscxreF-G;if%d`mO|$3gNpib@xf+mv^x9 z_6#_uN;@H9g%(91+c7*eE!+aOx*2rut7SM4xpH&Cd_hiEv9*l6U$ghI6c#W1 z{4zBv-zv0XQi)?bh)a`!Thn1!+ca@8sh(Rzh@#E_j&_$<(6lFXI^8L+H>w!J5PO}# z(CVw$h~YUi%@2qbxTfL)QlNTSqY_+FO-GjXl=FZACzI1-MgO&J=zx?%gheKtE$cQC z`Xi1bQ1OPvAum}_Y$Ng=t_Q!s4+n9oOVEhsve&?-VU!!vYK!Tu$wWBzj_K$zi&w+P z4Lf-TRVl`3rla!+6or|$SwoCgll?o(j4efm%g{7A=qe!e!*xxd&*E|RcKR<=1ZbnM z0RM+cZ|V^NR$yCNmnf{ObrA9AonqT4Z6(VKl$&ZIuB zBdd&JaqLtA7vhwqX|Ol*(Yda^UH7U19O4ru5dlhFiMoKHWNimy9Luv;hjS-1c<(c3 zv&iv3Ti!TuNPB1Q`;lsji-a&87}prd*MwWO81QNgP}ym1Pm<(C3GxbVdk*F$gqeUj zdpaYu;s>tU-J4HTvd8snw=)1qQsIN+gbjeii80TfP-e8bC&4p+PV|L1Gt1aEjcvxk zC+-pJC)MMjk~ll|Wo-hvWBf6uJ_!WTRVzXrrn|iuBO0>}dLjF$Kj!#l60N#Jr<|=8 zd4}2JJmMV>j|#f_dazW$wF+!8*Y)c`0=Cy6OhxeH*qZ9VguqRPBKe*G@H^eUY$6La znB(v_)Hu4zGo;DF)ICSS7Vqg>o7Hizw*V}6R+7@4HmUa5Vdq(qvVa~jSvv3wq!e7U z?>D*HCVH=AMU#O`kgcBXl?PL#TgPEtHJx_?imErjrT;c-FwxT|m)fy5rK+=?sLRqG zS{WyUY-GuJC_-x^k$ZlP3POn+v;!+I zCuWmxKwXn8sv#W1-JOn%&gO1)RK_p8;*3t!fXPO44W_;{VpCP^uKe*CeM!!ecO%R_ z+=nH)Q&7J6Rl@|MEvI7FIH>?D8PZF9OAQpC@kUuDL}6g~r_1+=h?XtC#Q96&(xigz zR12N;Kb_bwC8w|PhYX3m#INgfK@O1R2f#23GZ=`dP)2xHI(-Q0zWUD4V|~uRAnEIM zF4d?Z03aTuB*?PveUXe6?~smd4*%YDyFI0||feb$*X z*u|amzum3wm6rY%zRf2)#;FK4T!c8f@I4M7QkeJsNZKz^>$~3)s^-jny6GF>wl2R@ zt%|GAF+s&34@{m1{aQg0sNbof7s0Q=!tW8jIFJ}5-ezutd!@^Vo-UE7N)_(b#l=5; zZu<$M6zorcI#`W>jvH?j0cf*6v5;KWxey0Qc%ROEr!d>C@6Y&QcW8X{hlYI@bW455 zx}hU!jozpvnZd-7c#??aFAU+jSs48QXyO}S0O2CuF7j|G%+}J?ZiLAO?8>L2!g1ZSkqI_H&_=tHn^NydOodWlu{%pObS*@uzKGJe-x{hf;Q`U`NuggnW8F1E7esK>X^jfP9+{LT+?H{hkMW-+a) z%C+h5L(YS5CRd+88Hhn^mLPi66FA2~i3vtx0ag^hXNI_F$8L~X?W#^|*5P`^r7ZqE z!_hYX1kD#cCvd#)5oq)th$!0Pi!p!tb4A(ZxqJ79SbV&%EmXzhC?eH@bmr~_k_65X zEhWEc5tS)`l!PoyDEe0iyk;;HL`l452;?MsVvk-%p?yEtHmr;HS+RWMs9lsA?$U(c zN!WBC)-1UCz95*U+{Hi$b?`Kt7!!!0E{a+I_U&Vg^j{bjqwVW2w;lkG+RM}X7j_3< zj|(KFm}Y&bXt{~w!oLPRH{bkwRM;F(V3!?6gh#+OdPYCh;@2S8-)SO?c!7tuR^#cF+IkhYf72<} zAfjwT*BwF2XIEV1>G)$jJ>TRMfi=VG)a(~6p=KxdB_Yed8yp7spuacPTVVcMeg)Ty zaX-y}(eCi420DFVP5%(w7vY%!WX;>5N7+9B>>dEWX09yn3qa2K4E-n!MQ`uTKkmNI z*}CPq;c7lVlmu@YmFqnpvz7GQI-D;TIt^Gu=GXDyLN99v^nnFdHv*saN~RvCW$~Gq zyuCNn?@8V-;28N?Z=i2EUpkjrWr~nNTSFCn7EI2WG%u4ATckmQX=bsP>SrgTXDuI} z`rM2ZP)q2U;sLUOR;>0n9j0WR4*>U>q@Re3D@DkCVhCfv_?#a6WlGmEME`^i}*%{_;32Rt1mxUH;J;$)WaM@(Yf(%R_ObD|`twx->9OZKHkvjcg zn_K%9x@xMi8|m~e)|_@zoqYFtcRb}{B->n-XpDkWFrjV!-B^n?<(a7(rDRmi@x@Qb=Ec zrS3CP+#!n=#KMJukP#4`{TvUNS;@QFg|9CmXn!(gs6|ZK-tRUg*9EVZ8qg zah`!nUg^R8zQDb8;nF8a8QR6C4RD7jZ2rM?qZO%!&~yAgr$QRj70GyGa%eu{(#w{+)kJz%wnP}s>#;! zt`*I%?}7r@Wu23uy4v?uvJNiqnk{ewM(oxL?W!lZ){IT7Ry?t%zNATCxaB_8 z`ihUMgePJhca#qR>?QP59PI*WVo9P=fWOb8kBMmE)LTFJv(FDv^mU!No&1QHnPA=6 zZ2gX!>K5BhCJCq?Fb2gUiGpyeN?kk&YTY;9m-HflShUzFikO16eSieA3&j?2+PJ5% zM$6}=zbxpBB;12zLVaftI)#+JWQcNK`=_G0d(>iYF=MmtzJp~!e*Qh z(=5@%@hM-#@ff=HnG$`!G`!<0x)YfU(k4}dVyu2moer2LV`aQ2(G@u4^k(#%0S+d{ zE~wh6PRj)s$c>()2@2wPL(TRY-I4XIZ1K@g`Vt3hG1@{|WRSP#Tx&=P&tRY{^X)wN z+MnV%A=L^UPV59mf%Fi+0s=IwNS&tXL0+Pfoy;0a_I1pr*(!+(l4VNZ$(EwV)Xgv8 z%lVmj#}ccTxGQ#S#CQf)S7=pf-n}^4I%2O^1gL=JgWF|#YF@h&=GL@^>lp}5TnHtc zVdeimEzqpkR`@|OVL|Kkql9~o*Pk+fYC0%XQoj(P;ZuKe3b~$}T@7&P``+GWbh2~a ztD}a!6LeWAyU7HLLBd+^Wrr(1lRf-Ij>3sMhnrp^v|EU5a2f)ccNc(KR8qr&ul-W0 zh0;9u$&8^t!IZ&on%mntVOv9wfRk;VY*UOCZYN`HFZ8c5+c_GBx#AauD`HM)X!up= zzH9P25~8>B--Q^OKA+&?WW5hKGVi++34m@b^}&%8i&&AqZ#lv6z{Y+VcF zW1jKLabS2`XlDGV&@o8ewsCn)q-+I6vd)VHIds6yzaOw;qqpiXh4bmxc{~Tjv`rbC<<$q%Dkh$7N3^_dkkAc%eQkU-C8pA9bDxk*bu#HK zOff3=BHV79L7p|Zf>Z%AiS>M}-oBNrB7lC~pOjn?I0)$!=SX95JE;h}_V+#o+*TU} zD5{q9<{R=%X^{2Zx@N|Db6>UonO(utA!qDPZdI}m5j1oObe7+iT7337MoqsVPA}rv zp8XS$+BWuA+P4)sHvn56{1nGed#vIBs&2eIu{-548kfMxJGJ9?1CK(x5WJZ)whMTB z=7d$)w{!7b2HGLq#h#I-8su8Xv^vNJusJ8)Tz4lZbv(lu$H0mmU&I}ZlaI9 z`Ha-a)V!zmo0bWIU84TdzGY3zxg3@w9V?7fnP4cm0Mh+O!``En)tvRJf#(fSrSXdoA73)QwZ z>ct*;qy$J)B+n~o(Pv5}I1H#CbY}v&NDpUX$gJN&Q>viulAeJD;kRP%{-G+B^h~8C4FgEP-bDXin)E3EuF(X$V zy6Cm9iF4C}z_+;XqGkH;>mhcolBNi6*uda#eBp}Q>JjTWkD7i|8&>|I36pHG=JE)N0C-0b()Szp zBpA2}A}~5yYSnh{T9T3{7HdnN^ah)b*D?aqJW{GdNhPETn?W`f%iFK-vmg!@BuL>* z`Mp2T_vB`y-B>SJ1u24%%R4&_V^TTiXGtzkmlx;0Hhi z;>#A8CDe3XGxHP?Bh=01?Z@q-2$1UE5wh@i97`qobG|sDhG`L9_Io&N;@i#;y!$jw=gS!= zp(NeuSuKjusI}}vGJgB_CMA3Ou2gNdgf8VYE^6yRF(v7@KR`3@@1nik_$Ri9#9yLj zo9)I7GETNCw`O3U({;zSfEJ$gdCLlDv5U^vX)e@VUr5hq1&- zsg*glVWOsS!=Vz$)^j&rtZG5y&0Mdr#5g_7V^Y}HlRkv!7T)|fA78kSoe0r&rlfx+ ze`8gsHjO~C%W!wO;6>s~otUQ) zT_%TV25n{6=vx(gZ<1+HWDu!SHC4Pg_UyW+wI^%zmPmeaKsoa+9tT5SVr+zt?Og4w z;G@gmm@AWTcZ7<&?=z0~=hb|&YmBIJ$V(a8U`aSf;9(ee7Vn|(fo$U(*3QUA2L%FnA#XaeNOG9&n7ocmqI`)gevo7HqmM3 zv<5QqZryo}3I87Zg%atvyR^~JYq~B-F8j8Cx?@=giGlNN`k&C08zYuphpz|zRpa}+ z5)}LRV`Kjn%{kxG#OQYf0ShKh7;{eb`;^-($j3w|!XlR8DHHYk&)o!8L7UPq4)+*b zRZLe{R=`tFVxgi!{pQl+D{61@sPULUPAG;yv%&bVU#*BQHOuv%F3l_k=_6~2iR{y% zCJnY7akIo#=e0Uy0`nAuF`gaMviuMy^p=gul^MdFWl%Gw?YfXs7{s#rtYULTT6E>T z)3YjZb(5f=!Q@fsES)+q!v|lH?*kIZP-rc9SeJ<#o^MnZbew+lGx%>~$36Vdxlk z8FBkd7K^rh&1~+B&6V3wRB)x89KKlWSFg)oEu&J51Pm`kS+W`Fo&sgXWUD~UiTi`e zp%#tL>fvWw4*h?+C2F0Ai5&TigES1DKkL+s;}Z^r^U|hXD76Es>oi3!E0B~feHKkI zCuj>E8xG$;9>Cf0zglOp{pm|XSX7kym9o#5#y!n!lHm^i{lpxP5I!szkZqr#KbNlS zwB|MZ;%N$>N0V3Nh1ZOE()*7y6OzmPbVrnS7L^a_)8=d4GMvos23RrDfN zsT5z`#VhE?b8$VvQC%@57WINU2?k3mWQ71>JY!w6Z{qqyNTkCqiZS||hNReB@fcD+ z*QtoyqQeKUBkq%sWaupx#2X|Lyz2eg-+HD4L6enuF^ZcHc8Au)lov|e*c9X_+f=*; z*&0u5{x8Y`4u3P|k86Kki1;67g6)rRF5f#Wa$d^PhcJYU%Mc=V;hG~fG=^fI&MHV$ zROe38uV;wQqC1k=xQka#KQn%34I5keOEbcU-l}o=fsR1>O^!I*?8!JOuiE4>;4<=6 zZ~Icz<3!8dT0I$DZnw4|_r7SGs-g4{LJmQ<7)qf@j8tpAbL)ZuO$y)iwB6NKB7(HW zC0`+?2N+6!h{0|e3Wj@XP=tnCE09Rp#;j-*Nr2C1*vt^aSg?kxdlf_KM^~)LrjebJ z5k?*cW@CL`e9va&a^2hKNv_=2B$sMxKXMZ?^3&9}G{Kq9_rGn@C2wbzs)8hB4C_mN zpgxF>aeo^d->-E?cn=z)s25!?Yego#dNU?chV$}mAqFP9;Q zl(8q|NfnC_s}Dk_l(OiMDz=!DbSwDT=EsVD2*vug(N8N5&ClRBnGMg^2#5n6F>r*^ ztXaLRy)D}RSk7A!aUZguD~vpPBY;I{QUO-mmaPTl&K%3%DZ2ayfcc$1oEG|D>8XF( zF?|17MYqp$jbAmz*Ae=Pu?O_EtsfH4Oou4i;Gr2|rb!4igzpyfcGZ?Kq#4=%k@bC= zA-ld(5NTNSI(T&a_2iX$E|G~u&gsq1G97G_i!Mi6X$cOQf(38&g;Y1TNXYo1-_2@@ z2R}`vRNlS>Zii9HVri@cIEvO7RI`e&%^Hb>nkS;m&pr{ zjdIndzRX$eIL8&-VpBd-{Yqg-K!7{-1j_?;iZLR!z1~Gvdm6HWopoyOquJdKAfMnO z?;Q?-EKLY|Emm=p@j-!sVkgAC)Z%os(jP}I>ymoS(YB{omeE(`_hYbL@Gbj!Jz1 z$fkjQw>$s{jS2T3W|7y3*i9yDmKHN5h86szlRtMUC3Olrlt_-K_xyh^lQ;GnJU7p1 zOp3^%rCle+et@;7SblG zZL;zf90w?zPB=@9TX|Opzv+5a5&_=nYP)2~dl00J!=2Ul$b65Yo?Ra}IXKtNsQLYj z=K-+E3@6SM5N^Clvtmrn=!K5r|GpamzK_|J7&}Um7SdZhbM5A1+E&5-$v!~GPWo;~ z#X3a)Uo_yague%SA=qhTu@F4&78Bo`Io)RE)^-CwN&5t`Y=+dflwPd74exmt8Ms62 z$rISC#nv&l^8xy%A!fnH4t_>9?s#}?m$YYv-4T0n4&zhre%&!Bz_X%T2*`aUGlgC$ z47y!?ivmgoN1SeZus)KI4y=!tbvgS)z0LqDbv~?pe(_9Fk29SR--ZQWKRV!&B`f#9 z{OLl@`t+e1>v_56-`J9w>^b~jiG3q*UU6EydfM&X`V~s!MPb<(gTxwDyS6k=1F8I8 z)fF{mX+=v1nH{o_4~OG>f0@>jM?tYUs~`3gIUupGRoboIU1#*}^Zb4Wu?KBrn3Jo! z6R!p2I9`qUgXl+iy#UJE36yzgOP>kBabywE_~th0>sxg?6GtXBHuUnQ{xAzga1ZMf z){KJsr8`CkWdomDF|Y~>5Kcz&i|dX)QGIW@pKc+Y1#vRg{S#z#=9d$8D|z<{sWd0U z95M~&o!|9Q`}UWsElJhJ7E`5lW)l4MWcX}EVtWGQz+=xOw80YnH%R5&HbSLUKVnjH zj9lf5P(&0C0U&YE3*55v8>agJpi3qQ87%;|rBB7388o$h4^ymSsDG!JCP#-&%^21< zW1PMT`6UdBxgzg#<*urLW~JGU;!txiU|e> z`Lkb+!JpMo7!P^HbK7}TPWGN?L~yl0b_=n7CY`GyN(RBAJzXw|UV%dH4iqZfLQGxr zq4ZCwW>&{HG8p1u9`ro}TfY5>jJ5g}y|mSra=pn4DIL^C9SabppGWpVSI^aKZXkCm0mTD@ERXAjYY8Hp z%$ZgHnqc-YGm!7nftx1w-i~ysy6jyTFPc_f3qnoC3QUR*pbvmJ5piJ_(s!%QhSy$f zTE@`9iwW)|zv^mMd-m*5fz_o;4fK48cD;q`1qf}(MsVDh$hw(PzFxR&Ef7BzW*D>S zXq!7(QzSTjicykwuzG?&6v^r(R+x38(ztk8D@sY48z|kGp4OoHhhSJxA$6K-{Y2Kq z0vg8`F>n>q00)1QVr?RdoHKY|-0j-!8XQ<126hd*a%c$&9Fj1Qq*MH5d01)TUwg89 z@9dIL!Rh$r4*kl)`G{^%vN`k*O<}`Ke>~sun!>?$m<}aNma84#ssX-VX+kz(`{vtI zaXfT%;l}56PWa3S3zX~155WmXE&0yVoFfV*$K0<#q9e-9U%1Q8v%UA$xJC*3mgz-t zHj|5k+Or-YQ!}M88__`PL>Td5b*`I332UX)(R?maby%9-&sixQKgr z)71y~(x<<-gZNrdwhTWoJ~1^NV#DVSd;e@Iv_WgvFqx$Vxl60G=BiduNghGRNy zVeyb8>u0~%>pNPhI7^K>qvYH@jJJYP$)-y@Zmcw(qB|*WEQ&{G7JFxN3}PcX+Y9l| z@B|5~j~QEYPCia!&39$L9su&F6ZpGSL#v zwdd-u?Xo;bebkHTHs<7+0q^%5@~Hty1Qz;SMw9{&FUk zaEUr!7I$?E?zf#BcatHv%v%?9a0A?hZ^GHOsX#`|(=e$@c*)_%tu<)*)*=@2D0jp) z(4Qo6@XW{6!irKEekxDciHRvmN?SGUrH0vax^!g>JLx%`k-Yh7xhML`b|37WJPY={ zhsdxULWpDlr?8PQ5zynADuy72C3)6!SE-+DYH!2z!>ebzX!< z%>h^ioNc4;>>|D57{Q9FPU`x=)>n9 z=nq?*H?2#oGGRg=dW_rb_(Oy@v<>pGaZT`BpN&@T#IvWz= z6q8TWKtD^VMfR*G;l#Ef#)y)wkx#+;_xZ5+ja3Rn-1**uY}4O+!^tFCSJV1j?@H4t z#+4$YG^Mu0g|#+*k6uiOT)^DbAPg@WMHvvI22<{E*t}Qi_h5VZxdA%kZ&jKYAMKm0}@-CnQ5^HtJnD#OCwkv!$k6F=!$2~ z{Wxqo7fyB+jrdrN{zM=|O6^-8NmQvhJ=~9ZmecoBnd_3@6vH4DDdx8%DpMg_M*A)c z=){%TeHMx~z$rv(YLiPH9jB#Keqj7Y^rdJqG}wjs6;+zOq2n%2_Lom_YK#(e?=x!! zkEgp~zWgn0CoZ+F7hov#mMNe^)XS zFdq|Pc}j^*?bm;}+4|K!sH0fHT1_GHd2dq_#-;lZAFkPv1>uuYB%0;U#%`ZEyJkoC05Gm$FlDtP zBvRHSu}Ej3M9NyhysvWLG$$roXR%OFKUw%XMG^~OW^;=7ClrceP-BZD_m*yaq~1K3 z_IQHMK1ty#1xVkAFA_u!#_cvf8zf7d8>kJ4R`20b|LDU$=xmK?q8=0kYe)1v z7d~%YUWw^M3>NPrK;X@5Q&~;yycc8DIz4m7=gpa!%j^qBzCnc|x_l){qB(v?2R8*z z(BjEpzx&;x#z!jm#UQ&ob(5N8Vk4vb3gbCmtDtwaZ1zc0uT?HEG0x4{^d)U0nYz^K zlR?%GfECJ3nq#%?FEUR8;LH>K%$#ciozc%ezdIDz&~V|@@r%VEO;9GVXT+hpHar$U z&(c2;sdvC&p)T0v(x;vI=43j}UFR!Y@mZqEeY3_oTIyHJ3lo?E1p*e+ptyZlYcu#c zf~H*c7M$*~r9S4fQl9$7{e;KOHdKH$%1b{40hG=7P{6zw!2JLqi`zU6e48PfF9DsN zaj~eKTXVJ9b2dzR@>$+gq|H?rP#)J^{zYiKD3IfD*`Mg!b1zXjc@-nT&Kf_BUQX(@l^Vq!NpE649@VWOhUe?r=t0X(>Rkn71Bg;xT_&p&nM_G$?&YedA$+^y z7PHF<=bzW%dB(xe`5lXP=FQy7n>4YNryFsoHQ5bt;din~lw`0pLM`x)E5L6+2T>K| z_Y=yS^CWaN;G-q`ZfDl>0-e{^{E}&J_lWvLkf|X*Wg9C_4DX%=tG6N??lc2j|7yHF z9W^(4*{y40W*pJnP1$cOfhjh=E%_PNYwIw-`BuDt&u1%FtG4!_gM)p4ecex`iSXLw zq~oHtT8g-_2>M5nlS!V51Y`w*DV{+!g55Xl5#BYW{<=lvH@eS*Cy0Z+RDsj23V5b1 zO#-_|h_m)D#a_`6v=?h5Tb44@kl9AeR8U!7u zLjYBIm6;{`W8mcWW6oH`;iHl3&(+lwZ027ZMpqIX5`0ElJfJ;$x!+8QR5GYt4~J4H zDtkAc6BFFTUT^5DYcd?X%*!!vDO;2GbO5=NtaXmdB?3R=CVLDACEUMt0fT>Vi;O9k zyiZkOjNwi_r@IJODL>O5@^2FSDC(DJAYMnW(IM`6-Z07t4@)fw5IN{HJ4wo+a;@Et zWn=Uj(rdVhCfG>=z4581Zqhtm3g<}T1tHXe?~GT+=bTU~10 zWsD}eDjk#7CO*OVKO`x?B-IF({pM0%{rhV`DoK3cdjkE`(3^JgzRjjlagp2D?LrLA zs)K79V^BKVmM!};mS=*h_>al+?zanv!K`rqfd(P9kCn^Gq7p=_fmk>>%Dp&E;azg0 zrk2bub1P`yfCxHRU)r7tIN12d>AkTmh2ZKCW0GEOyzgWfKk2*e!B9&FvIP4c+3lGK zhW?keO#@GdNR|!15IayjVH51!;bhxX?LC^4S{iP9x!6-2P|lSt*EY{>m*nD~WLC}* z3-LeC3$E>lNY$zIgrps1K`n;PBB<9nf7Hfl1XQs;dGkJzf%cP^Q1_rbX~(c0h0BrG zH{Co53&s70c?q+bX%a4_g=bScY%43u{o>ZZUEi@^jBS(pUmB7~HCg@O?uh3!k?!U9 zc??5EL=%>lId;mD6cV(+#^)SZ-o(*6%z>jKm#%xU{&gcJC{N4GEbz}hr9rtbC>kh~~GHJs0D)qRT zeObP%+hRhN5&^&=#Nc?b7C3Y6fPN#N!}4;L1~pHdfG_T5Y0D@+O}gd}?j(5k9+DN+ z9|Jcc#cGQNZ=+Tl`xsf(uxCSUx!QP*1W=UY2}x@ESTZt(ikTUA1+6yPFY{`OZYt1o zNf5m85XAx$>z`22adkIqUORuzL(*_Vmx8H^l}Cw=Hs4P!LCW%MBj{H&=J_06bvOig z6mv4$>SMpg#VlXfmIs8ER2n0{-q)j8*MjZt!BW6YhQ-tqAB~}sjBDi2nit#H;Q?AD ze@H^&k7T$OTBI?5_ZtV-Gp&~rpGj_rD>GmY?%006YXr-WZ@zu$ukHUCydBqgZJjKz zfRugK@r`{c%2J*xVdooLO@n0^9c4KYW1DWoY7XK7a1}Gp?&!=+jfdKadfe&z^|X7O9Y8MN<8X(lQdSndqXK0g!rYS;7$ZL3+H%&-)2Z8tRS<-Wf) z<&jik3oIKhadLS%RP?ZNy#FrA{}Lx8Ka`qdxX@Dzfi&_oeP!W_0cXW*c#e8r#& zOxEVcKzjB}HzJ`F3~0wnsK?unT$hF@LK^fCVnVQ@=Ph6N1ljNF`ToT&olLotw?I?r zsl@Z)j?|}u6hB_mu=BqbvU)4j9>R?y?g)Pb$xd&&U4OlfA0G%=TC*T z;#$MFf9_z#CLN-a16HJ0a>3ip*wh1h9C;E?-$V8mL}kg7 zo4ZHH=;W}`&SvWu2YBY4KOz?Ef(-7ni?UT%z3_=_6z(Q=^xvUT96&|(w2s2ICdxAP3K_1JZJ`Xyk}64f|G z>ZU5W@9U*xk$=SS%Yj?k9R=xRy!elRZ>PS8YX_y^6sJ2khle7wK+ZlXAe#ROyuhmm z{g9@c0dlVjwzIg)Qm;Mn?BKc)8G4n3EvuJO4f#pb)nvyzg>rX=^oEOn!4!ubC1X`b zW6C1tewvf#-VZ0A#i)MOhqXM2OpiVwy2XDRQG~c{ThY6CoE#c*?Tz|XcmUtrsua9a zduM#0-{$tp&7acG%TtF%T_l9Pj>SF}u#LA=(*sq|m2iv?2UfYeWl3h@hoj|b98Y85 z7742zAk>0nbl1EBVHk}C45cVo1K<2!SkQ74*gFc(rSO&WO%q7ymuHy(Ypz_?->$=F z)@TN|*q5gEi~Xr~goTHApHHSoPB58I?@!AmJtVCJ&w``3DFce`SrOek`!?q0_)cC~ z-lTU22{wV!@5TcjO+`q9`d&g{V7vQxN>f$*H6CLBZdQ-t9(wXFFlLyQ&O&CHFCJ`Z z#h!K10y}p|US}4phlB37S$$a_fgk>AyUO$e>s`6n29AW@&@QEL(;AI+i!^C@;VTr` z-OnEGHSCKyHR@xN{^gcLk(Cs0I%ywY5v}p8VH>uW@qX-X%D{~@$q__tWl6+B>vrRE zTT#RPss7cFTqB#OD^_9AOU6P8aFN$;zP;14_qX{|RW^POA5dMS-*&?RELb4^J4nK6 zA^`*4^zf@6sFtjbLU66SQrYrKTXUz{%f;(Pc$;&&VQzNFD$F+qeCTlBE@a0WighHwly$e7fTYTekRiWaB^FpfHT+AhtXZb|_52|~ zx?WpF;};?vz+10!h$fCH1PHW3hwgAk=M{o|fR}s&d<|du={EXP>7R81+r0_}C%=$t z{*jI%-D^EHL}B{>`Cu8U`U}Ylg$o?|Bc38_>&tA=LCbRvQMjWg@a*MNrvW{VSYEkj zL?0B5-$JyoKB%x>E;4j9988VYBn4<*#mlmUrPVEg|}BD?KA)J%o3$cyx4T3$P?>NC@(D zr$hH!<;K2Tyox*P0=V!Dbuq7-DSVL<+#FUIkmf7s?TAu7wp)`nN8ThhtziwTE0Td> z4#7S`YeT!>a;W9=du^?C@7L8|2ayWIv$ydTmG$sOEaE5^#hSWZ`g3Ev?m=W#y)SF{&Y2AE$1?4t47&Q7cOVq=~#+`R1WE_h8R(FRo@La z4c12GpIjr%!WL?!A7N3mo1$kS<*A(4AJcz)}r#lfY?`~~Ddvoay&hwkGPucmvHonDfT%~|W42^%jZBshqX!hz^4A-48z7w`m(Yt*I&FXX z_cqkaM|#(b;(FjNUQID1Rs(JI8yyg`e?NFpM3i1H@7yrzK+rTV^sRjKo+zusV}5d_ z7Hy#mLmWlP51>2B9B&qOa|>8+FY4Fxs`|s`^mB;Pma8u2NZ`9s2LZWYA$&KUSS2ft ziHFz^P(ecmWJOj)2kIMxNUeq(?q~V-&xLw;RWxMO?p8njUTP{}dV3_j)uYjlIv+@d zYVClB4cAD#1Q!OEYv5$@RMIawbVAr3If3?4)#k>wd0W(6TfUA75dmk8zveCe9)dU) zL+TOO)Wq5#pNu+}A@IfA{I z=^KL~N5q?!oEO!tQc7*EbGyl)_*7)k*9gUkUO=N=?@Tn;V|0x#M%(?k8r`vLb0ptt za@cBSelvJ~O9sXI+Vle>uX4+$(sa9|-OwJsM?<=WTn2^p-^&p-OO}4+x7N1b$FUtA z;Dc`kkLRo0>1@w#KtXubt_uyok=3AP;M;@Z3`g4y+{5OLvHja@N&jMF;e8KRLkgD# zFs5Bj4YWQ8qo)KRIT8eSuo?Wc+-Hg}_KLU%N6_Grbsle#fbS6Wi!OYS#95gcZl>MG z)KS!X>Ui$~I`{>$B3_LnB;$TVb0_jVR>d-)BR5L6Doxr0p=?p&AVPH!sw0!cCh?3c z>GN+kT$Gv$ZWYpKerkhnScaB>%khND1)e z-IoARGSpgFuo54cVA)hinjm$#3dD^DN`KvqBJC50 zn%gXN$s=131EzB@+?=xV^dP;RDwvJ z^j8_%>^PkZbT8O`&-^k`e*VDyX2o!*X#W%e_aKcEch$O8OU=w0c?W(rT#UoHu0u+O&G5b+CLOSQ>u-Gc0$)}taj{qRuVPIrm29CO zyEot{XJidHHbPoF<&mptpmKH61=e38?{*;>gp?zFHsW%Hh#J+2)EHSOdHO5Q$>G?q2Tl-Pa3vzk%w^ z9HTB};&Hskkh#se4=IL_qOHtYJN0dbnv4(sG>A_Aw$Pxfos;=QNwvmh!MaiQozSZ2 zAo!YLKBaL--|EfREFFPF|6il8Sz5GtGDy_9(5HF0(k?<|3xHmosi1ap777KYQ)L-` z_B$z#w{J}&vY+_;N%s`?dFT_U^B~+iV2AJgqBb@YYriZOuuW(^;)#N~k<}(yWL!Lq z77Wn%e3Ouxb(yro%81B8fd3H7&(k&6|!qm-=fu8GSSOTaXHujqNM4&{{cb_ zd{U0?w6mc%3a>MGu1)j2V!O2-ocOE#d;H(Or8{n-Gwa;Z{}HszLAoqhVyKpB&>^H zOAL*9qpxgjnZ%^8mEyApHqs5kkBH7`-r^W{&|c+-fF@56P1BUuilMyoScT3jvS8X& z$xn}>E+l6Q5fI-VI^a|E05WlBI1!_W8+rQ^9Xq6y{M*J|ipg;yK;i5mYpoAI?Jv6V z651d4TT-()HzJ6JyU3%G9g!$q9~(Tz8ivJZ*IawXCR8C@+Y#A_>ek~0jWrwzZVOL; zdmu)-LtY=6XJ5z>eTS~9pYQ;wBG z2H-dvx|`*eJIXc&PRWS?=lrXLDa?Vl>Yfcq|8!db#th$V$m4_}#2UeNwmX7YcX1uQk=h4Am2Ey(e%FC_+&bh=VV+KeN3~^Zr7&`gbVn zMjxtOc@D#eVGIY^hZ(}Dqyvq`d3Ryr<)x+Fhib{98A`wR$7>nwFUb{-&&rmpuddMx zNUI(3!ESnOhRLtUQsuH~-i6#_rl6 z?^R7rbjQb3KlecWgf zdRGATsoAkkC&oFtir>mPiX5hVdR8MwpuCsq>K4M|_l>UtT7o@VO*P0{XqPe81uGs99n!yHpq@H|GTU>80^eyO}EcxwxFE3T0@ z$H?9pS9DJ9JYIXd(!Bid3c#iI1pfg)1HnrE#m4;ICP3fp$R`DE>KP|ZdrcQBX-EqR zQl*_x7nruXC@6v_Ku}>s>laxFm)?$Dj5+hA>X=TR;^Oc-XX`Oq?^>e<`Tq!*eeJ;9 zL^9m5Ilk)=vpA9HKra)MMX)Dq-lw&iyv!QBI+!ZV$CJ!tp4LWOjAspLqJbi4T6}(q zZ9R-;P5C+^hf~88Wu^ZcojF*;JekJI+4bwQv{b1in7!r9Pda_<-*{+V$rsBQ0mY`> z(ycL3$2+F+^~QbocLGZP5tx*?mS-ln_Dt;GL}=OJo5-8HPQ0eODKx~SRMZC|6^aAZ z2VmAIx}RIGkC1o2)IP}b=~z&&Co{;ne>-f^9D&oogaDs4F3RGmpoA@wCF3o(pPi{j zv&}wIrw0kL&n||6Uq_x2Bx{t9RT2tSoc9WSVB6HQ!%;l@aOZ>=e8XkVKdFvDhd))YKuB0t0OnB2D}aVS61wh zx}anT6U0>+Qxwx9yx4TAXI^-QI8-t6;nVeQ8UD#sNN3vWNyreR@_B(2VD16B2XRVe z|8rAFzX)!^RQU}FMmp=oZM~v1`s(7=H_G{MWxNQf3s{@LF#=ALQ6NL4SpCxwzxfHx z)K}t{cS##IlQVl!K^avfOPg!;3kjGA29%iMo_OWXhqGkuqs$}lLdy+6ekX#tBJ+tu zove*+nDQC8HB&9E3muA_fS(U(<3!Q4-aro&4Z>z~CALeEb7^9#bWN+Y6|3(p_C7wH ztSS76Z$dneB&goOuA!*jFot*b>eTF?52HS!Qigq^M(xtL{hdRerBTEZRmnXwp7J;Yalq}S z0DAiTusxT(>a-~4?Z!2L=1SGhdKQN4K4A`)9%2aU-xN$D+CPs{6u6YkV9mmzJ~Ui*2^qlR2lv$nbA`F~V8KV4r% zQ&j@`dn?4db`}eJS7LE<5LR&zUJLjljULdSM?g^2?-CX?qu!*R&Ub#1SXaLPUh{OY z+Ov;Pnf&mAxsL|@1aNYUQAckb!$zXaprTs+K}8GKZ2CYU(dVAhZK zCR+LF6q7ZsR~CAtu{wqXXus+Co44~YW=VC1D&JkvzIU0KO*$-2)=ba`m3;*H=3?Oq zC9%aOj3j4EcS^0B0%6EtiC~!mHP@HN2W!BSa2q5Tv0Q_PLbr`q)cC7H-l~$U zYJ}b2ty0Okv2)S;<`|kucF18|yPfL`2q7UhN6?&7pw6#_(E1pi<1JYmJL?vd)=-Vy zj6n=LNn0?)OF|~yI=t#ZPVG7b(v{@(it@V>IHk)F$+y$xfjaU64D~3Wz7`%GYs@us zJ`eNo@K{c^HE{_@^m>4RD$MQhYDw#2<82(y87_t7S{;5i>a!U=-{aP=m24ho4){3SDEb1AgBTyTv z-OJ~GDgt5dVZgpZ%VI<-hGvpJn&!rK8G?^qzbtn1Ri|06`S}2|b`~WTp}E!1L-wRW zOQ23I{4NRPRuqSdm{PhKwsBC8SukH|etaT|HGZ(MrGNc~(9urSFqcwD2>7KdwLj~n zAVd@71zzLZksm+&()l$O^o?Mv$c&Pn!O@pgnyuFJm$lKCG2_qsT2 zltOebZKuZH%qIqSHy!;)z~6IKkJTAmv>I8;dcUV|cK#Fc_pjDvE=CjRcvp&h98xhL z2cmB-E9%d4`fg=)h*aP0Pjz=N&-&4eQRkI2YRA(FqH!P9R0F1V%C;5 zYs2HFxm=Y@$rmjwxi z?q+HJ?o0CHdrtiQ#rNyFoov#7C6O2ZvKqd=0jobJueHrzS$VnV6>j*a`ZKu|MlD91 zu_ykqI_3<%HvG&^2Bf6AZz zn#^Y8A3J`21pwcoQd62~PH7UigrY8dzKl8jST7NNsghG@7Jx~NUqd~n4IS;`{zg7c z3P{9T;(931@2_drTF!*sOorZ8%k>E5zRoag7>u_MX<7N8@>sVmNp#Tnr#F2qi=Phq z0Kt`t2=n=DD}{90y&MCFm^sbMRyh@VgoyuW$u6zy6sG5sG@BUOcLIgRaGD~n_gR7OfKKI>+cYQN zZmQ#*T;I^XkA60iw*SKW2TbBUwFgQ`ajCA^%reie%m2(R?GuaRrJX#vn?paKavLqb zV3lauiXt7rRJ^O-`1a;8agiF-SylVUbBFOxyF6E5KbNV1U-Lr>Z}eqUWs0BVYMPW( zgzbIMBLLE;y!aPQQpa_w!==i91kNAYhqZMO9M}I5*x9WKe0E1RPnG|upFGZvqv}jw z+4oFx%;(524EMFkf;LEoHJTgFF5_qDlVG**!9+O~xY!;aK{gR9%ZP!3T63x|WxegX zt<^BYpaf?`FrT9R=Oa?vo!R^)PU<6etKsJE?$Giav$Vm1i7oj9p51JoBtNAHmWncW z^XH)l?cWCDXtL~Ymp%#YiTKe>ykJ2pq?VdJN)#uBg6dIg8IPRSDoVuU3F&macM6_}YNR6BO!V#f9qqBRayeHUl(quwk2BlK9-6JHf$Aha_w^oOk66rr5 zspmyaf9;3TtbuPc;b$aL1hd3Q)#MP{3o^d8my`&Vd&N#HAEaZHM?U`Z+jpoK3xl82 zz32$f2Wn%w4)weU~ku%I_+ok=l^;q2<#HfsjP+UhVPAEPuS)OVs(74A|y+cGVYg~jmR2i99w(_R}oT&sH*)5C!Z z??`AS#zuF*fQgw?R>XR;)esd&3IU}$euKb0zYj_-3(aod&3DV8M=A}uGBdN>Xd(H} z`zT`FmiljwZQWw(fX5a=unWoTry)XVA5nITOo@V(KhaWW*ZUSf7x`aEI_x`Ja%wW- zyv+s72?!&b9uk6kXVCm*U;4*~^+Fm&5^Nj;H<_*lzKG_35+bNn@j9Z`2WfPu^teP9 z+A$FNie^OodDR(%qJ^FoCgR<24rA}`JrQD5P`H?nLs4I`14Mdcy34k{h4`eGwyc60 z5iV;i7EmqdkRC(OA3dTr7_|-+J{HNrtYaByA6jC|J3MQEvv!qAeDuiT;p)KCok9|3 zq4gV|>u=p1OSNZBSC*nP=fp+r)NjpP`cmC%hxw`fdzH;aN3l$(lw!eLgh$m3ZE6F< zBZoK2EtyHfU7y=K>WK*#uLv#?e?R8IgYg52yAuGJ0u=Ai$j%&~k4(J|EYQPx?0YfW zEXpV*Zzh!s68%yDm#YD?mwHHm5-4Lje3k#(`rdz1x-XO@{{ z`je0&8RX!JkYkDJqY(;qoAaALJv-+L=H&V&fxneSLHGX^CE4MTH|Y7!K@?8zEmlm4 z_^o_Q;jrE5<4;zNzKnB{(QV)DA7&?s%~7j4^E6Tm-;Q<6ny90X!+cB8fmT|dO$0?{ z2Q!3ndZpHHQ;3J2w^{F5M6rKPia>_^M}T`k^%IB3MCoJ;nBkj)OkASq?kfDb-wW+> z6K%H#W^p1Mk7zFn$j*7f-x!0qE-bT_}qUn+6!#%X^*&w*!?D>x#ogdE=Wt&HD z1>-qJ(b^l7yVyS;41^~Kd^ttfQmicBmey>_ec#*v=n5{sx#cPseo|=@?)SZm?^1U; z$WGMY97mps;Ev{|2KWFdus$nTYCh!Ebz^Wj^euEkbaY-!Fp^HuIrSz}MYuh2{AfM+ zT=5>{$2nR7(8tyijbRM28(Cg19}DndIltIVXX%$^S1cB#$qDMviOdgxrCebzaDxyA zoFGt!ZTs{kvazo+*vB5e-p&0M(%S9n6zLSBP@WfNwjYqrPA?LoL#nJn9a@wGXt_BB zV5}mhS|E)>*Nv(-pMjzv^6v&Y^s;;Ht(5go3T`Inp0j+R9K0ape;Rq2@6gX!=<#{V zN%^^wj%vnwxXtG2+P@-=4ClY@PFHnJ^ZT2@xf3rdu70ZdqPB;_J%;S40t~5KnL^~4 zXx(yJoH$pz@F|udJ0f#?>j4QEz4J$XqoY^it4|Mxxf{WQ6x&=@79jVKM0h#I%SxB} z$EKTk9L4P`bzbcOCNm;8=o@6{Iyw-!j?n7kG>0Dy70^Fh@;(o^b9el1)l!#tMwT%< zT;w7~XF;5n6vHB5ISr3y>)kv=K;2D?`IE)i znqLjYeR$6xYAzBySMrplS_HYcBI9}1Swti9z9c?r&**7uEZ>d;>9k&7ZpZ{R{qi7H4EJp0$jM)`l`V( zsc;M@y@8blBHM^BMWKX(<8jSI!4F_pe&B8rP7u_`3-It+vx5r_i8El8ktvO?6$q-q zw>jTLii<;Pd7sCO+er|1w~{`mnGB}>fxiVti)t35zF%kDRR-8a<0Oa-kojI5*Ye(u zDN8C|Y%A_gS8<;=hxWad zDd)f7aBS*0G`5TPC z&R9do`&OZMhHcf5tqBf`@8-Q663$2M1#0fFq%$w+PX8mwaozJzze!>KWC%MysfU3g zS0Ih4BV=7x*NHRn9cGC>VesSd$1=+$ucA%_ka;h9FE*rDjpEb{SR6A*Pe+Eu2_lB@ z%>PP12ru+8cRjkREI#CBZW3=jVgtou2mHe-zw1eIXH&n5@SM-R&eadwI!sjCk_X!>K!U#-DP8Ieuc9ME|8}P7Qbu*Bn;l8bWdXX&&Uf)}_8P z1C2%4#=_*$v0%Y=`wEN3&B=xJ0S3x%7u@^(;$Qjfij;N0^EDd7g@kw2@8*Fo@8gPH ze4JCMnuotvY*{qMCv@VjU5UTzn0iu*ecXpI$^xM4{g4K&eomzo_)UKp4O2*x6x0%( zYZ5~xb_Vrmw^cX|R6L_Sqnhd*!DXR35CGAQDtf!DkK3mX7@vswEx{Zt!x9r+5z1xG-_nOk;BDrY`@;RUk4JXOVoTx!;9x{XWU)^|-t~4EdV)5n2U~)lannwyc5~Gf#KvN*M&6Xk_Lfs1Q z&!0p$eQdAGo7VT+*BlTb7Z{AP9Ir}aWzrrvCqye^-S6UnqHxd}Kzpu>0yREjQ*Yz* zCSzT*yR$jpY8@InDRy4m6l3-cEU;lQ{-@8xy_hev4C9U!K%O3X2bwB|G z*`p%OMV_oFH+zSY#)(R0mbF*@O%gARHe>%SWGlyWU0cCU{5cS;5l+TsuT&MVI%@{z zOQNS-m=@sqzs)g#gn09>J&4Zz=ml%p?0uJqzCpD1LL!Th#@z9_+hl6QEnpPQ@LT<) zzUDVeaD6^}CRIB3y7!Wx+v4O0(H+W@i8NDn*})KT*b4q399Ur=@(Co4n-=S|GzWYJ zeI3u&)U@DP)989}h!iiV)V~h-f_qN#CF7I-KI3f|j;s-*3)H+zfq2Jv0cz1!MvG05 z_zVj6Dvz*t;_q{&HI!x;e?KHMKI2bTcQU!C6#o3LH9msD-j9;FK9q8gwO?gSxJFsk zr+HgEcN80z&`@c=!=_u}6I@<}qd1DFbF)Kg8?LXzgHQ0HA>|SrM7?H%w$gN7l%?oq z843=s)Kk{AtU0Z9Zg$&EkSdP((+f>x>v>RA3UKC*2T|kjxq(YE;t6?zRlhdS7CJvN z5uwh7u3(4E$wHq;S*&7=d)o|?^G({%tLRFLj=sZlhrCFKBzN53V;Zz}^|!3Jz$ae^s7riYsc*|%cV4tf zYEIL8)bY>((ot)Zmm)Gkp9VXc1=oQca`Q`XXUa|wJor#212Jy){3r?Ui;tHckck(a z+>s!BCYh7?XF4=y$X05pde@4Z!X5D5i$VMScVo7EkHk)Y4((TUI^y5xm9@#K^Tnnh zrG@NwX+UvQ#kFTQ+AR=&ljNa=@bCpW)LQmZh_992n>U{5`Z@T*7nWwhN9pHrfX922 z@Y(-8d_eCdfV-ZTTh}AxHANV^A*B`*y4jGh_ZZ}*a;BHsHEI(sK!XVLhY+ZyX#Pp__59bRU z*UI|~zl`v7#+m~c4xfucZA+`%mn@ji+zEgtgXEpXyDVApFVLa8DXbmkRh1ostMCWl z>SAkp2tKc}IpirXc#Bi`Z{vuZ%XQ9O;ra;M z3&&$F4mF2iMM0h)x(Oc?_s44%%mFWo-txBHIzP38 z4|CuH8@zOVl-t50+0aGj{R#{B>nh6KXN3i;EQ#04mw|VS;I@^dv4O(cbWqEztk)zk z>7=lozlbv2aQA-%h4udtYzyOLs`XRJX^@eFBTC=m~Xt|`y z)bHyMC5h1-+)Z5`WL_2StG_r>N} zI%DpK!o**P?D&G`Js>pe8yh;f&w|EI$)hhH44e}aKM@(f3k&3UaO;m)E(kDKxNr0W-QzUZoC=^64J5fQ>O+kn;Wv}$yr)PO<}Z#< zK;$e+yrTLzPa}qX2xg0$^x*=8jQ`f*4tSS3-$gO_)}|ssMAq)!kr~dWwuXI}Q{!N| z_ac7%hNX^(Pe@4N>c(&6V~|zJ&`8xAh-UaYDhw_Z*TXS$S6}`r7i!vfIkzQG`Rg-C z@fn|J)IEHQ<>}ytdkyl$d!t^$wxJZZvtA?<@@-+A_EDsn{_*CuAH8`n3HS&g`)w$m z1k63zsRHt#u$A&jSeE_auo9%vZ<+1OiZ0JNEgR`$}&&kHU_1CXf**}T2sU$rdq{MDd z8)E4vhSJ=Epw?0>{k2P8RWVOZ{i5UkOx5KRbI|wYpG{`{(EX4B}cYMYs3EHenvd{bL~X@ac~?p zIxOXsNhK}!ChcC!Yq2e@2ZZePqu57xjVk1owPitE)7+HNHH6g|S; zoxGi4jc+m@(FNtH-S(R5Xb54ksrl)j`4FT;%o(N5`d#S2}lWM#j z4l7T>icaKFc@x(bcXeG)Pz&a%(+vyWoMN#y_12oRo#`>7uh-*7)JJ6h;pN8rVs}*# zXE1bqSjoDFV~J`w89t2RX^U@>oy_G|xh!kHr^A!zo;CJN`$*dE+X{D6ZFDd!(T>`k zi?(i2+>9~iEc}J&uHVBSDoBw8t}Qn*k#*}I?{y>Rdm``BL2i(>0hnC<4|I_>5+AnT zCFT@sHILP}-@PCG%Q8ols86*w!$$3taMj{TBwDe>-fgZ0p!lJoNf0JYjJe+seNRcF`wt3`2J z5*L^B@R^p0oeORqfHbpPYeX=gELCvdM@w7GsW`!p<9z-KTlso%ybavosWyHY=*8EI z^l~=2U;TW*?@eOZdHL_ng|#C60l$9uv6AWUEvn#!{cu_sKBd^<`ZNBdx6)7T zRq8H{U|OkJKC@`*`_M2R5?~c0QYEx$dfR4dE1@&9e|{=}JQxAhZ;!MhAyI*2)dy{j z+RY4+tD%uA%1(;6tp{LA#e<}x-*Xw7)!ZF{)Lsy+UZuMlLuXr}*+NS014C&G zYi3YGG(SyOgc&FOp`Ahm)Rt%w_XHS@PRA=J@hvWTU5jUQm;v|=zj@GuUkN$$I)gDB z3|W=K=U=HxT@VdiN`u4I4&StHq|KA(Z!+C%w>NvbQ%&S1KDamZ+aC$U1)D*fz}pM$ z*FIP}|DBkM9}&Y^1HLmo^DE{(?d>MQDmg3tK9T}P(`?!s-Ww0vh-uTv?yB$VaQs;E zyBal4D>q0JcPx;hg(=7kwdqdp*4P&@JItiRw0Vjhwb1?cK;oW%Q{^ExFk%M40(8P~ z{gFDpaP5exazG?&VB`+H7Vc;rwYx@>{CCDNy0OlZOv)wGy7_E()Tq|eX!!O+EML+e z=wFf*i-h2&sGtjeDc-$rjCJD^UDDzjX}u3M%;WR-)>??1@hpN^bnd*fijE#`c(v;@@)3vIkEWbA)0@z+#$scb^nZlSgE zRtKXLF=5M#_9;iV4n~kKavLq#&(#K${GLhVe6aF*Q6OPU8!SscK5_|7%+c;siKg@$ z;qEFz>dmCuQZ1?7!-}EBQMo$xhV($zMdyw=?*mhZ4xQa3n;$|1Khk%`xwE2GKWLq8 zpGafGBBr+Q1Kv+*UF)Nb!h8^8yjGQ6diG)b^5%Z-^1i~>;T`wnc>>q>Vy(USyu(0j zcC>*rk)ZmKQ$mDYq&Zb^rPj}*`7s~4MYSYtF5?|mMBFi@gYvEoUqL|J=mp@G|v6HTgU)Cw5w#GZGfM@_a3MgU$dRvuNCwmKpU^mVadlgnT8A7SQT9Tr(MX zTa+lSKNHtwRBJQ7!j|)^-wobsOl&sSLb8ZcWTBduWN$Mix*bFL;Ez|7#&s8(Tl`cM z!+Myj{&)26dBdNU^y%NZX8tl#W1i8k<8$Pm4TxU|D=6?39ZTcL3&K#j@RbuEIEV@u zvLx=K;^R|3+FD}VobHTq(4CCZOk>KOFe7ssqr1P^rvN@UNu-VKgXaq)Ow2Q~td^7y z!_I(vz(iIhqX6%-Yn`I|pgv#^4Up(Rg6JnOjs=k1vnR_Bl;3;;*wy zoXw*t6aCvpnx9Ke?Rz;AN2z_qzq}UfmkM)997#M?;S0xaDBOiUU&oJc$CmbUDykkb zU(FhI#-g3mr&X%nEl99tqoUP>Fr4`8Rs8$=yXj$xbV^zQaTK>UEH?XA;dj$%vIU+V zwz~1vC%xQHbc*Dqh~%5plu!gzhnN>R`0J*Wb5j<_B)v89@>jGAXm0Wy%Waa0a5|B<6|7ql^=@)| z|5-Eneob{hG|VGh&8-~j948L7+%c~G;T=47Eso|Xiozd`Gx2I{=o$DqlyvA?O$^!G zusTw)wd&qtKI8TAEyC)vKV#1!UkA%`r*ExoEXwtH=Q}_!yOsFh`$~PqFf!Sm)oyYtYl{ar+5EGo=<1@k6E%PVr zn}Sa2JT_B%z)8GyD}!dRgZX3vrmum>zBz5i7QOL#L&)-!A;W-W%4u@hXg{lidJRzj0gRXK9qBz+!o8%InsYc+9fp zz4|xS(wb5_#AZb&_84kAtRZ@)8aFSAE_tR#j^eKb!MBOKvz8kVZwQHl>7R~kXjV_~ zS3NXm7hriuVt5o3RK%^IA*7(L}EfVs+v-E zrFRMII4;<3PCjGayhJ4qK}Y&&I6{K^bHNVuZvGp88r{`A9p%Ra4-qK>^q^|m+YHX% zA!dEpDg6JY6X<$Z33?Ae3(lb}Q6#70cRnS%aNt4C`+C-Zfw##p zvabHo=f~iaEpmAE+kT@V=h@Weghn0y60eLu&08a_pT;XfHJ|wFj#)JTxbQOpmpp!t zkO{0Nc7V#t$S|%@WTCk^;fV+=X;Z_;d!DgFvj4aFj47)pO;+6)sVqu<(C|KPjxD#( z1N<4p;OT@+O$ay@B)5+eBt7i_{wjB^rSu#(9dw;${>*!?SEBa37jJZ7@KBH0es!0l z#%8NkW(&1Q*0Wpjgz|v`8OZnBGY)@-^6EfM*z!LQI7Fseg4S|z4_F=it8k0lO`7c< zRej}$8Iv91ogx?NKHgK}nZ!oFd0sJJGWu!rXAGLRX8UqTRlo0Zkj&aI>)h{ZFZ1Bf zKFNU={j$d@R&g|#lsw4%U9i5_#9-XHbhKT3D}Z4IYPTSZ20>_b69NPzo!BHXy?KrVS z;R4nF&Po`rnGz0n?+UfM#(;Gs$hpyp)UTgPqofpL5sUI5c8F85G-5K>E&R7@rgr1c z;TLuN#YQ_p2Oj{!m46k$OQtUtV&BX}2E4`8MjU|pNPt8>W!U>YsFwOiE&7`O{=uyj zHrW!xTt~qFM!gBJ9{+rL+81OEsb%V3j9z@&!6&lZE9x=OG;9UTT$

(=n=t+Ycb` z&&geF`9GiB{Oo7^U@lM|c)4#)DRFUj@Kur&tyLBgX)Rdd61o>&oH3WCIp?STew<*~ z5AtM;uh!T0i)nU>@Wvm{zw0_Dh}O$5Ua?*yIbPVE^rlnJ&cNE6XYnGQX3$0FW!_4@ zjGAc*zqLpH$E#OUzRS+)m1QN-O**apwdRvlJbgSVKe!^Y5C7fI$!gK};Mkc=(Lce^ zXxFz%UY$s8{T_9w2q^=Oxrlu6Js~)eP+0l2JY6;~^2B_4HsakIRMZFmk-a}xvj-}WVK$jZXT}sy4(xd($t-^9uR1$%Qbgt) za4M=+?QO>uQ3udG^r#=;p5g6E3i2_1V>-s)qR4Yl@tRby=Kz1Td~U2~+9_cR&CSvi z;<6;*g=l1t-jC!CF5E!pCSaLzQKP7FcrEpTmZ|>1xffr|4hP*D-{(A?9{|M&Q6MYx zjStGjU>@;B>nXqQ!Hq88>mPNjYn@d2E$j<<*lYY!jn>zlDL3Yek7AZeq-40Pb|ziN zJ0xFIxPVpf>PP-kAUK5#n&z5msH@foSH||NnH~f;g#~>PXv5L>_VV8_B1Z-RpJy18 zH>;T`bKEufCz+>`9c_N9PUO6yYKtO+CGGkteOTI*4{)%f{;fHQ`=}-q(rA5`5ut|G zx^7wEyHrs`xX#ZEN(|Ee%#eH6@UVb9?$o!lM~krDwh>=eUD4sCN8636+k;emPE*-K zQ#qCP9~A9GzdrAm##-e()3M71FXPwO`~)~9LK2IJuFmfr{)u#q&s$q(uQ}j{eQc1X zOrzmc?Bbl3A-VecN_THv-@9&TU%;M{tvNpZ zaS|l7Yi6K0V!culW&9>1eJ0Uu(Oa3V=pm4%0{yeM;5|c21I#MU)Gz283)!@MH0Rn}|JssNN7Qk!WuC_EQGopwxV%dbcFk7ZQMFF?u(KmaC}`(K zVW(UvfbWw>HW~$APo={u&qX|b(O#tEG%G7Y{rogkgx?$cbIRw0M}5P57?GESw^6JH z%rD+j?Gi>b+myXm=^1>pFELSgb?=85sqVfKHS?RR_4 z_a6zj%fc&RGf_RvNS9>uemnLvF;P+b25EA*W&p1hNNO!rqeXvws4-1p#QG!mi9XUC zEaxUsToZd{6f}=u^n6v{r`P{0vV)3G*3sN2 zb0#G#(al}YzQp^_fJM3eg>2i`n&5`Kxdr#oMJ9ml0&bof#>fDNpvjl?Ti`KY904?z zwe}C_%+b*kHJ|2;NyF}VR?xjg6dQ4#w|Oe0@eYJ0YX-)X(O{cY+z@k=cKP!1zQH18 z@?rLg$Bd#k+xs#yZf`lP1d%+RUvwKlK(3oq#(ze}r zpX66w=K2Z0bVqBAT-J-Vd!#Js%&|*Dna>@F z=M{W=a1jDC9Z7;ryRnlnH_MT1@_r3z*UHc8WF#K*JndFaQH}R1Bdqo^a9;6C?a0qH zX=+n_Hr8TJ6xO$_`p9V4+RiAK>s1h-dIbzHoM5oPWmSB?eAmy z1MxDDP0A^*?SeI7guj{T!x%BH?a_A2+lF6vAZyJ>A(YeSP@)he)F*I{JFy~>iGwT+ zJoImpzs{LR-EuhF{G|?YmR<6wxa1fO%;?)vZKCX&; zpf&q(19PzYtG_?qG^ZW8+2yF&I{e|^VUiuh^KQNc4GW_M>PvXL%AMAzLT?Uh>SWuW z_63X5%ib$q3_e~fKCwl8(ud;ul7_9(opf=aSt(MZo5cM>S?18TFa}xeGBtq-_BPB^ zOVprG)*Xl?py*BFC|fBBb^bggg^2_(d+^+Txp0r4dcBBDt}>xZG=UN?87q|=u4rO? z;dgz(VwoKjHhPMIgNlCqtl4eH5H8*LqWu7vk!H8?OH?W{pV$yJuk*_{nU*dNvjG0H z-emi9aUA#Fv;@ah+7>UnMTwud!7p$vq#NK`x&u9y#of!JxsCAsyha;<|MPmgLu#=6P9{!J3{cMZHe*uu0kZ z9~UKBiAzPH(hmlGo5jcN3r=|$=W;24%zFL^abp@fC(B-cn=8$>SY1%ESH^Zd8852- zii@8&+wo0aK-Qv2vYlrWC#%=O#G`0CTEN)3hT<%VgV>rqpIl!%97o(U)75KjZIKo1 zOgV-f=+xg%Jyde%fo9mmvV^5NvR&^2{Nm%!IgT=YfoC)BEYz}$HF5(Seaz3_*l9th zNUv)oC*AAK-e2_ob0PgJ%Vt@-p|KQ|6Y)-eV5)o|Kwyee)S1RSUJEFBJJsM$qePlu ztL6|B!2D#eWQqWyUje;L+Sa$#!ZZ;j$SizXBg(J z>DG08ct%98aLV+oan|J(U7){GWG6jH1A1s)9r}xr>b7uckWmsI*E~&|pGP+)X|cJpu3QngP(&#hj-FwI zqaQYb+h>Dn;vp^~jR*ja6Dj*}UvPie>vcATLJYW3;|nDPDbKgfBxdTPpOdlgQJ>?D zzP#3_!zT<`bojEiJRRH>dlo~tCy)o*;FY$%X+j2J%nK0NN9u)2@In~FNx%9%sTFZU zghodB-%GLP%Pg(tg@RV{l99Qebb(KlOaiq=MRKpYj3{q30i;hVFdK|Q^PpDXtm=SW z-X{N^+Ip+c$EWRxks@1|!)SKitw7A{ndQjs?niZ&2BD9hu!jbQPOtsI?*pggH<5-x z>^jwqpXbN>);t)8-Snpm)CanYc=7sa#|ynmPDrC-wvFd*6}=$sbqD6>L;zkV4dC~F z3dn>rKi%em!04iO^&L%ZnMI#9e(@FGurFWEKi=QIprMH~O!HuO=jD8<9-0UX(xF=zwTz$6i7uCz@f zGq5IrR5Q8p{;`7l;}p2`vmOq!@=;GI6=KiyG4IoL9P>Cx1JhvY(k?Rg_G^e{>1v2h zYyC_I9*pz+gxGQQ&5uPhvwp0I1zJk1*Yl1~n91wg15@_Pc$5&FWDaaqBaer3RI59U*0yDmsz*n$Oy+vfHlu+nc#s{e^RM z5a z<5Eo(Enr|OL~3?BhaJ8dt;&B<@zSO> z#>lY#U9AUpziYG8P(f4sv%#}wz}b!;5(%vn`wT!VTH3PT&eqf%&G%qvI?@&0?Oo}= z?ZBMh7d;lPyHeom))(43HE=(q9@8hqiM;M|=Z82g*8OfjPCwcSkjQ&dki}_TJ~HR8 zzoyB$3USdyaRP<_qTV$c-^myVeTZdIfb0a_U+d=lo<$2FcHv@3-HM{G&-Axm+S_f? zoeEnqTlG-OP{8}>w%7*}w^-5(O?oRf=UbAj4Rds77n^^a5zec_W)X*!=>DlC#B;7- zo|s1maLz)>&}OLLFsW49ziao=Ebe6{Mo+4f-EWJlr2P<%`g1?Hni~=pMsZK46}AWr z(?Lu*0Nx1t?{b>Tr-E)+qZB4KHGZVNoc8%sRc$C8=i;&%@w$cWnIq*#aoh+3Eiazm zskrMNqI*_CC?(w-_m>qFCYh`AC+#xmdFg(bwMNYxJEy8x=IY7~lQL#PcM&rU!4pkg z_}s^?QS6Ax|2p=EH{)^-&sYQ^t8cHkF{UcD`3`k2|T~pJa>G z**cv7mvLW)XV*L;|Ir0F5<=bpW!!^`|0z*UBk>p_1z`AtjSx9-?O=Be0I#gRE%#Md zgJpesqykI%;>i%Nc-mX4z^ME(n~C=nhqBMhjpo@i8a{Jvyh-PCX|Ur+%`7GnSVK+> za_@&OtSQb_k3yXHRK>C@xNe5a6g)jy7E6d^UkrIBy%G%L*=OMI*}u`zwFA5%m;{@# z)gTm}&G@((`8`lvhy^&VZT@ikMrA+0{-kd~uC?=vv}BpPHSJ)f@zVpk+Q>@*3p%|v zF;pkdHQkiRLZei@GvLy(UK&Vgz-Cj+!B9Vs-8LhccukVwvG`s&VbzCY4BWBm5tQyF zmK)g)N*kfbz`g6pQ@Z9?^mD@?4!(m)t|R2Zmrde*UsZY{l|S8 zP1#9CX(og%^!6E%>G;yb)_Iv@tIoPhYwu#_^o{(==_?{{ChiYksWdZY{;kjbm2)}( zQ$w*vtPfMvwOhoqYqe^XV7;^;HC?B-AuKLc_0n*l~NL^8OpbhxE@}TK{T1zo`7^TgI}+y4>sXH_|e5-kCV5)=o-w zD*_rUu{WI=wz-Juq2m#sT{atv5^<4NS}!Lr4(ABNS_TbBoR|RiNAHa#2H9OhwZPpN zElWRrJUPo~oUoUhpzE!3$qFiCr?sJ+{)Ke8ZSnUW=ByE9Ld2nHV&CDNEJI&)GmBWw z&OC)z-{;sddQ$Hx^+69M@PBMdrzz;;?6|6Il*Cd2{*bQ7<-l5@4nD#@!=!pC^1-x8 z?>+OZHDK^*p-GKqzy0RcB_6`wgqfR>R(EE|gA#*Sg$vf){ppQHUll!)0IV*hlDD&S z*0Tkf=6Y-XwxT_O1NydtWgNR&Sex`9NqW1=eY-J-3}Z!)QIA%=fL`$|a}E?r;uv2> zLwMZhSD%CovC}VItOgv4QCa`@8N66fOqV$J`2 zis6^3Xftov8mV*6NsK&=GZ31f;6t_E>+sBobiw4RBt*)0sKxoO%8t1mI*DVxEak#D#j9~&I*54Y5#(<(p1<*8*lxk*#VQO$z~ zI1Q?vme$W+^1-Y@iZQYldi(pmcwbr%-+?uNfY*LY+GjS&1M)Zex3c+yalMpJ+uXxV+(#Z&WZqZa$P@Q4pr8K|*RSzC>99B^N>l~lAVsKj z&wCBT2Rg^weA-k<7$o3gE4_kyU`byh4|cEJ(hAM?Xm=w-b+Vdh`@r_?miZYw1}U44 zThtscx1>)CX01JI<^sg$tOHLz#d1x)UAv=h4nTdA9oJHC*GQ&JI^xGj zQ+PVM+Lo-D-sTZ4wf@>>`K72Xp{LiU&GUG8h&2+~e8dh6&|#>?uZAY*r?*juE@+O8 zFEUx2JLx@7E{W(Kq%9e3jy6ZvCW(LI*-ej4K%c~UzxK0y-l~C}H8P#skkGc^AB0Y= z?}o3Mso~KN3RZUR9?zuI`K>^=RW3fm?U7>eVq~#9j5H3LFIcBOjXe&J2(KaShr<|i z$kK-}|Jn&-B%GtWhQ<*|-oN2$u9mRR@-$aRxj`X;{7_ZMWtmnfLx9ZG z!=N)%4bsgIalZqlv}qox!UPr+k?#?2vtc!qszrte0(ez*Mq>H?B7to)3g0PL=Y2x0 za_j2FrCpgk0b9U*dsHHPX|w-!Wgcn6$OBhjTdX!SoBpHdaKJY(E0vl}|MI2z$M69S zVf3d#Q=Zu2t{O#B?OQAFguLuao8OCOfOC3pXyngjO%BG)E67qX;Br&15*aFlG9S2x#&93x8D3=0PI-_2<7x@rb)xI#;LgT{&KfS@%&Ns@q2ZX0o~c>w|qQ z6a!pw9g3z9M;gH|d(8pA)L$awCOR={2iX*r&b)EmmOOb0*FHIyL z=XA0XVSs~ZMh-WlB5z)ywL1|0snxzbYY&Z4374940&Zi!c-5_Wok_ACamr5p$R|z( z?J_^o!=N7Prqc}w&+i8UmcgyYBO2GrTnAlM4GoPo-X|^kH2TKN`$);VtN1euj7$OFBtz!f&TQ@WWHWQF9$1&E2jUmZM)`{kX8U28bOOkVoh3F7=6gm8(c8#iaCg?6|^!9baoW z+veuDCQ=n{kQ`zJgHi>s_GOqYjI>op!WhI~q!m5BuBscw1-mp_?M_7CTmO8WUX8;= zxrJn^gT(NIt>5u%;P>3W7^lKeJWzLS7(Fs0Cn`-CI~(*!c9UZ?Eo7-<7?9OYGT8eB z7$;iQ2CbfzXK3Uk7<6un9bWu4;?t{~Oy7JxNlNo7pFc}Opd!1vd~nbG2-8J5+r{o^ zob=c!XPL>duBlzI$^6Nlf@vRWtM8BgA62u__G;%V09+RFE_6QcDd3f^AF#Pzl2R>> z;($Fk9jq`mRW7E`tv%!Wnl85>#RPf z@O-{CFm7=qRu$3aRG4WG^cY}Jd+Z`GHUK5*ur7DP+(#j})q>qC59Vp}7q^?6jr_-5 zWqPB?m8m9G0^UCqI}!Z2=3uu7ewIpPFrYI4^GMJTzNBx|T-{qMKutLF&Qt0he0~Pl{ zMKM4IpD;k?1>P|K42@%T=^l3U(@%KuT(}C$q z>a5!Kd6CzJ3^L=qZ@gS(vhXzI*6M{774n&LtG@cBzN*=oZ6<4f+#%YFG8IbyU`^uC zH2rF?7&3PK8TlDi44*;T0$)`WFp8`|)o>u)qou{y`DjNq+QL+Q>fAnJgW|+`h?TJ1 zvvCD2bI|3tl~Cu&1#u)2K~lU4p*ZkoA`B6)S&sR&-kAbZExLK_l;M<-_;OLg*no8V zCw(o+*_Txdoa5B!{QFeEB)cM%*90?@qhM&dM;p6X4FA1++LUM2j&(!^TcTa=rxgjS zNKSmh#pxYx0-2t3WfM4?Zdz?2vY^MmKo&sP(xO~xxmeZTeO)W7RcVuM;`d#B-z)erno zl^MMkkNeh5hf4PXfL3K7w{Kf9gaN`m;QIWY6`{!&zeGDV*3?q9n`eGi`L^~w&WLSj zF<^1++cDAFjvRgTL?_haH>&1FAG0J&2e*%qCcSwPI;p?tmklFjLfIY2A%0iRJu~m- zFLd^J440?hA9iIQQ=rLlXsYKO#7zgdGeQ(m=5X^(@ZslTw9CvuiRU{hloU*U+MOfi zOttYhn?u7NpJa)?f{kU8xhS`IxfFAl)mo<~V4T@`^y9pA{qLFXcE^ppiT~or4YF?e z6g4W;Z=LIb;%pVkXud42_gh$5G9NR@A`WLS#i)hB?j)M>66ROJ|9O2ftc$Wy&GAJ0;$T3&c_@l?!FY+2o;%vW zZ%}{f#JrFR805HdVsOy5vt5*EGlXAwQNSrd!xh^Ph~yDVLkE%1{$-tV)`75bY0_04 zbMh+t#IzX%7wiVGZ*S!HX(s7tu2-~5Yq(m)EV_EqSJi!IC~mHe98DZg9kV#{Py`vqKDh5tEt46x~XECp8dOjVQ+U(nTYJU(YY`QTXLVbpD#M{EN zd;f%zp}a2GFtcr`TJxrl&)BmT1npAu#FU?Az9%WJ3nD}e-9L>GV6751hPD7h@fy+A z@E5=>)vpA%(n;*=Un~~6;fgFVe=2_hSC`_IAwBJ3wsUN#X|H#=RMC2#Q?VawoL6%o0iACEX&pNzxMI44s^JzvC+(NCh`E7ARE zCm-fN_>>N`9%0a0qme02y;fF{?t%v2*13s56QB>!q&JWN`mQw5)BcTo>VdDHL~f0z zU5rheL9tD(+SD#K15;;_=IzkBwLyv-)e!QjE>5LzZ*S`6&Ty=!Qlnn1Eet}BhKM+B z@H8ZAb|FZ01Pi>RUy}~fEprEQY1b_d@&;q=%UZiO)or9qGRfa0h%{g?L5M=ufkoEN$geLVlRLl(v9Kw2z3jj4?df&*rQdSDBN-VRMf13 z?ruq zf}#5xEB>m@?h{|(VoiavjeP|7(of|}(|lRcm1AFp#F-<0K3HJ5!eq`Ep)kiYY9{ZV z|7L$*OMGgJ=m(( zv4x2be8lVYkQm^W+xFFb0FHCzBCcP8g}#~@CmWQ6WMvvnEGmWK%|D?_J2ZyWWj z=<7*dNBRKX%@1}sl5*vOv%Z@tw+=2OUQ%?U&Kx&#Q6HX4M4bLH<@eBd9#R4f-3mOK z^ha_U$ID0CJQ;6$$+FmDZ6(+sr1VMfXAgWmmL2?M9ppbJI+CWb;JbVT2~#rh2T_7! zMNKr}nAB2}@1;fkkvtMpAlU6~0kLWgN4*hg0<@xMkUDXsj<-V)LK4ANYF#~*v%`K8iJ^y?4Rk~Fbc z3ycwif&^O(Pg-8CJ&x%5-o{@uo30LEL8{zllfw^$Q?Tk4NbljDW^SYU{X4)X;wzYJ z7#B{W7UBPJ{g9Pi`Q0S_ycDFsko?+J`PS~-3hf_4MhLF7Frd>CHtPh`4J@|T*;Qfm#wfG+Oj5`1o*f77@!PO_ zpo|m}6$Y?Hi2KINW9BbMnN@{1%Dr^%>^=6lohJ6zc>>6&1(&FHQ8PLST$0pE5i>OE6yBH zeuDs9JhDa6cLfPbT|Nv&5RTIl4>jL(SRk z*^&!)=2H0EGBUXF+Mr!q&v2bSCE#9@YlpnAb+BuN%5?xiB>XJS7L*YI`&8@*2ra06P;H-9Wk z6T*hNcI)D4(9hAYcgk_z7rKga>{0uer`BMIv3{Z)ZGcPGk{h0j-2TLGl+-sMaqao8 zXu8Arw1*B?{+Pi)m{s@3()g>qMYk>A2);+$|g!$ zc1MGizOOU~3-D<$a`jFZaC`Jl<}3~Ke{O%f``7*m09xSwfZGo;g)>Ud$>R4H8=_V> zmDB0ymWHB?Gjl5Az4*Sh>9%nB^tiFw9d5y(PrCR6=+qIh-KmjG%?5pIkHE3MT<4#5 z=Y3jD4u7m2JO{kHtbb=Y%y?_^&MX}J>anF0Y=S~45q=k%NwO@cnZ7lO9fY$}%I9yj z2$}s!HtyFR!=**pd@QYIkz)i*S!v(uII`qrmw*$4qYPkcQBB79M_35!e(Ym)O|5*j zDc44#4EEaKzlm=J)0KRP5i!1RXoQ~GUwzKsA%3c>_;i%R_)DPXg6b@> zVXdiZ*JI0P55T$!Cco`Cu>q`iNNtQqUxQm==98{|c=o^v%IGa;em92eS7nF{jEU&L zK!0H5ch`(w^<2g_?~z8Tn40jh+6ke&2oYISAr+?909 zQRNA{yTSL^6^{#=-A9z%{Kp!@vZ?29A%WAO%WFTV;=qosG@(y`5gt=dg@2imQ`=E; z&*#F@s?4dI*Mg(;u`X$NgK1%v`=tt;RuJ3}QurAVYp(g|1fkoPM*-K%0vgS=3a!7& z*q(`Z*ffXimw!s;O|QfDbHp;=`SmMH6R!kJFVubwJ2iZb2pIT9VZgIia~+njgTxgR zy*52EqKc(l(Un=T%{Z;}LPEiod}EA8)j8xLS`6q15g}T)UpgpZq*RV%Z&6O_&Uy&E z4;8NE{kV_ed>_WW-MOv`^MqpXWKF!pp5ZW+2k^nFL_g_`+Jjxy`R$f@kLRiy-I5)h z4@1kcwlaoX2_zby9@)0nt2ec|LF9k{%mC`FoY+*O9Ps|{y zIRQ55XF8F(t&MHav?v8!u{i5A%A(0Up$?Xs2S6jt04({9T^ZjU)(9}tv zYoGPX)%nZLJ9cT_nkbS!KX>Y%%}U{GueSXaa&*N+kCwk}l?9_+5E}7_P%vcx_hc_% z5pvvEN;Gs&&6J7Gtz%X7G9(&4s@~R<4S63@x{+<6ZC*wS?}bqvLF}s>-cnEVhu9%g z-tW`;H3wrgPCr#e}mcx2y^)Uy8j6!JCVE69&|mH39bZP7+6C3!Pkx`Z`AhY z@>mhE@y%;j?4wIdN~}!mY(x!bB$OMLKin)vG&O%im!t-aNe@q8W;`(SpHifa0n;O= ze6&a&|LDqj%~mb)Nv*J#(V%zf#$=InHW94;_&Y7_PeWG)M@kOuGXA?xl=P0H=KQLAsdsM{+k?Va;m|6;x1`OuY?M~Ro^C74CAvRI>G;| z&W9-pNYAu4sk?0*WE@{1(u@33O{NU+RJeXydPM z*@xHBm|9oX5g>X(v~@58SkrK7MV%ju`_0P*W=t8RTNUai0H6}FyNI0q@+tYV<^kr0 zrQ%o zqii%l6Jr1b)J*L7di)LP*IAwI7KVHwM^Knxe+Y!l$z*HK3_np?wIX1??-Ij}pu8QZC^a9O(G2uc)0C>cIVIPe zP4G@3R2p>t+*^&3avEEevcQL1?iTCTDw2O z%s^<)*GQKZMvyN@We&>8YnN=?NW27hxOU&a2pk|P4%xmD51nKbLv3sx3HQi8`VqWz zq%@fJ(6m%THRo-QDCj)_R-7aY4%zK7rk|;OVG@S$)k-L z6b`cVHm`=)RockP^&CoNuF329E<<}7!5;J7lA52N<%(Tt^pQnjnCC44s{LhT{~~|I z#sd?L1X!E#h^bs+b)OzW>#j9XiBsl;2+Y$4qz! zx&)v-Z<`xNwUJ6XQv)hi+rh!U*K@H82ACjVRt|Z(H?z!}Zpr?2)jl zd*;$3`yCcJE43hG_t6_Z-_*l=ux{J!#g0Bhu^+mc1i|Xd9lTDGxv&;E*W?~tu&i7+ z(ho`!i4nCB`n)Va@3Sl0TK#xQvn!?=N2yA?=;Wf6( zJ}ZjrP+gQNc;)n3J4_dpZXh8kK`rCdFeUS)XnJi0QfyAf=C;l}A^zA)S@XN5Oz@cz zRhpBhiDId#D(^g_RpCG_>}6#0h!??L8m(3^S8@{k*A>9{J4lHLmpuzfl{`B%LNGPsy_>dwSfJIO0W^!V$hi z8oT^d)>V$Q&)!TmkN+fs0Tb?6IzRh`{dtAPo-g5tG2%{DQMY-(TlfA%FNg_E-%+S(|MXyvF$(Fro31 z15y2L;#A=21LGGDJVrk$%MtI=3}G2Ik&fY0??1Rt(lu$4uQ7J^0w8luY|W8x$kem< zY&Bp-U%#2pfvvsmFRiG2d}=%Lsc3CL0Q zfZXF*>t@TI@pEZ?4~Mjk!i1woS?LBFbsAVW$%Q{oety5_GR__E`?Nh_PBNSGp^KhQ zB>@|NpnS>7;5l`<(|F$|QbVlm19x}!Xo9N8&*sbC0^GPkCCkas%&C=LXNW6frni_C ziA^#qK}5L+5jnxJ*ycPNLNivQG2$ZJ(k;`qA&+~cy17;vI0+ro8=WsV zR|hyxCnm}%6Nz(4IuP$6w5PYuy}!)ORolp-Rb!Y%;o?u9~EH6gHNOf}!|6SH(|1$-e?d4jfvM72toNx|He!+#h zXDVl^Fn3~9;?K5=$rx!pNA}E%!jC&Kdfb0|Yw^#eBP8!Zp{Z1k*O5!vzdt;`O2n?f z#!}HvFZ6q5@nzXQ3DyO=Edgwm`O)A;rpV*uCRuw07BlI_n7e@Op+It%UhFMIsYmL<^N}e$cOQV=Ix~&}UVliDQmMa5_+4 z@5qDxY+%k~TAsOot6xo?TSc2Ny@sUL!Vmi30}#H-PrEG+~OB#p!siR9)zbchq@w#(PrYlx58(j!X#Y!a}1*-laQe_;^dO@|Jn z$P*_$KEM!cBi4Gsjqd$SR3UFr>Xet@Y>hBRIRnKoBEzQ=;BeI*>$ z_|~l{fC~{LniLg%0}j|U1wb1es`eWd2ime<{O0_1)TEK8SgrbW7Pby<6u5!mWV0Ko zTHtRd_#sxnj1TZd?q+4X&A>1PoM}b^tL=|`OsaHX4#%u14uFr(hFL-{KMNm$;TvIW zEc>B;FVsOVijn2#I-EL4-mSmZ0Au(#i04BhZwUdnZL^lh_$lF>iH?&6m@^N(UdaHMeEB zuf%_R@8=B{KU%x7a01sGpdTsrnKy|a?!s0$294iBOAPngVls-;cnuQLd25y2MJ!hs z9|aoyiLCu_Pahb$)&LXq8w50_>s~-cUmg}>%#V>nJjiGB(AaEdBDCK%vHj2Z7)et< zM~VO^Xj;q!%#52~Y=R-;0_Hv46@kuh_YHz+i*!}##6VGz6i)3Wwoup~VKE82A&%CX zAZg$>twI&nb2hpfX*$e7y_DH>8FcVB)c3Rf$2Ch02*plzW_zf+;vCe9X^{@5*}d|3 z=hDCk?YaKWYhOl(tEza#_q)>^*2h7gIwp>tX5z#M(ofVj1KapWcjPr39EXuaQ%pmg zQX?Rb+&XYJC!@2dXxC!VN{_!H{@muAvZtRCWhn=sD?a|F3V0A1=FciLe@d}ggW|M5 z!h==msGSfr=10$|8mj`9{@oU{rOlKbnANE|y~@YgbuOF~JRezVV3-++R{sTE&!eq5 zBp`k^SiIu&4epfGe>@&3G;-~rbKmgwKHD<&LxR^9^NFeJNdw<)HAQh%6Gzy1EDbl7Msjdn%?r>e)ny+ELxT0a~FL;rYarQx+?T363~8p)bJ2PO;QK?#Gy1wutb$7Sg|5yjuzAE}mtW6{C*-tJ?t`(S-G#VPJjEM8_~v1SU;<7jEb&4abYIT!!;N%&R1PsiT-s>Vld!YoHfeVx3Q`1P9w#o1eKDic?wZjtIIx0z2QT1 z{F}trBy>rjpUk1@Hg~$RHS+a;Hy(8A8}hq*(i^VLbPAz$`^>OhM_mPy_R_>sDdPx}Z9K7*&`72a8l1_13h?E%1&SR* zSz!nI$edxG%SPyv`##$d9IKMR-5zRZzMu(y0%??_g+KbLtn2KWquGGwHj7lyA)#xlanS><_>K)qk7x9Hym39n8S6AHpQF%$7Sb9(r> zbT-lZ^oKH*!X-71B%3u_P@|`{Z%I1qeO#7rA`n6gSMcCqwenq)`_+x+&A}b0%H&|1 z_%?oRbQs4``%*)4B_o&6&klpmznq0^yh&$By_>n+{bie>4o$x?Pqu7S_%+Zcw&R7> zvyKO{_1Z0gij2~Anw$K_D>{2gk-wtgoxFgTXic*;M17Ev!VRq2Lbl9fF>RB;1F0O0 zbq5;i=Ro7%f#jXei&DHM3fx6=A2%bNPStFgUv5q4{ZLtxV2^p%mZ}j`zWDAOG?1s> z$8KuvJ{GvrkmPD5xl4l_cW*`~LXijnT$TfSdIvJ$_zgKHd@n32@X(3m$11hSp_p9l zqCB;S8pEK31ob9-&Cflcr) z;-UryYH9IFhpG1B{dxF}T!U_hEu;Id*sXH}f)pL* zD{x752U2i4HzxvU5$oRJV`jg{D1JRK)=zgmADuVn`Yu7XQKVHuCy(=B)?TLFzv6ji z8~ z84MOqcH5m3I}t4=kGz`0Mt0am(!Y@{MdPq^LWbGaiMn@T4`Rjkw1o%RKn z_Pt-s^kjJj0;CVTLX>*U8?pFa8QtqXlw!@lzz4q|AIs;N5U~7<3C%68g2FLrGuQ9M zNo~@#f!!%h7 zCk#|+-YA3}2g0xEv@bEvYUk2TvihKx&QMvI+5uVr1R=a|PXc1I>Bp`o;@w+S2kSf6 zc{i9eM!}mrNQ9f8yutSsw(jV4)uF82pQr%v&#gfxSKg5LO? zADBwVZBfqTCN#U*fuBUE%NvU^9njsb?HM&zgSh^gGiZDCT|z90+4NUJ@TFJJKaEA8rt<*{qXe){YctC-qui zN1BpdhSvahn+V|N8EQn3V!8oA+2Gt9euGhboUez`o_FPbIo|#^STiDob}=+sChOCK zHY3N_;RMdKH!8Jq*dnhvK}i;*+0y#HP1&yRydhQ6e4-;wVx_n-F7_EIB{Q8tsbSzP zf(pp|?Ak=D0MOAZXQ2lh;Byw^<;0Iz`u>lzwkkhzi-7_d@VAY{5}$qwow`zEh3a!~ zFkJn;CvlBPd()tK^+4d}=nnLib}<{HP=h&u2GXh1 z9tiYr69w{KSo&BBxccx3i^>Zr$iGnJ6BLyf<`YpAQRI_QRuth=P!d)a5q_Z{q$nZC zC;3eL8NZW<{oS9tb$ z0<)o!F*E)Sa5n$W2S}FI-X8K=TCe}f|MT;o{ND*V_g`ga`2N*(cw3kv5v|e-SgtjL z7O4gPyWIbyL}+8{Z4K-}1KTre4=*2}A$S2<%iqW2pBw_nkGz1B0_6AqWc&Y;i~h-0 z|0TElr_382MWDnhst^s<` z3y>qo8{`3U0a=0qLChd6knsOYd6EB=R|L5Or2|1;z;X5<2apeNj69I@uLfH{Rt33% ztU>%BUO*NCJp(qOf9(t$3nb+4|IvQD|D6}P23&m5ySuyg|IT{_1%YbCK_J4`|IXu- z0D;JeK%k*k4@)o0|5S$qykpw|SJ-I@2=u@R1frM%jxq7J6BhczcXwL|0^ux!KtJ8?euBvEgNCqdaIl_$u*tA+$guAEKukbO;bHwJ z{ih<}4GS9w7w;baeF8!vpg`k85H=PL4mK_h9^StT66+nXAB0PWN6sQBe~&`j690+k zBcbrLg8QrrwcV6Dzu;`bR$dVVgpaAHX=vG>a&U5Si-?MeOGrLfe4(VQ@>2Dc?i)RQ z13+DC8(TYj2S+DwA74NJfWV;0sOXs3xcG$hj1QSv*&jdUd@C#}{$5gAR$f=%(Ad=6 z(%RP3+t)uZI5a#mJu~}zZvM~0;s$JUYkOz+@7_M*^z8fsd5OBZ{-+lf2of9tgbQUrSVzl06EaImp~p9%*^xPS^k!n=p} zpLFkkB>exR`~MQb|4Mg2CI6`b8ygpR5!}PO_rLc29}9OYzzvM*ZV^O`g9ThnIAkC& z2!rALmH_(yYXh7M|5HB(7`?9-GQmv*79)WGFV)iOH)3e=RK*rr_GPk$>?WPpmMJCy z#f#ha>c?YO)Hp;b!sahR2Ow~5rlkzz=b4L9=e5K8HUAe&Zynb3`@a8Ef+9#s$B+<_ zQo1K9CE`S-OQk_t8a4z32`L36_oACgH=`SohS7|W-UP;gZT_C`&-eEy$2g90JfGb6 zeO=dmo{y7%vdXsFzwUy9Soe6Fe$VaJ=E>tN4Ne|gKrQk=K&420n>L`%VaJbSje4}3 zn|~jk#hO$}ZRlek?Lg{jh2LdI{6zbb51t{G6YxT5p|X|Sog*7!7M$rRCN_=MSVZFR zLr_}@+Hg#BsNKDi1QK&~7d)sPeEr6$P*<~Qps+;(8ni^xM{bBE;KlT-3e}TJ!J&Cg z4M`g*x1u~~+Zt+yg^H`L1nm!xgM9SfTTdHgJ zfK^BT3P7=UJy(|@h)&7=Vb^cOsx6;I`(D3Rl&5*1IM`32A1^b-iY*32*Be(zMvZ1! zbRF9pr%NJ5KV;pyQe2JG%pg7vA0ufr16q;l0^=2BfPi%p+)3e!2&P6)nc6k=d6VK5 z5_>v7Zjf<_)&TyXQRrkwY44mmyQUI=stZ3nuX=on*c8DzU>puPU5m6!|6IC-JGdRz zTanh+#jlfYJ1W47WDc!MNbLA`S5D3pvufPWF%^e*9$v)H>EtPPs+{Y&GKvjySP{kY zOz5QjetUVl4S||`;TU6URJXZP@#=bc63-ah+XfDDLz7yTU~MhlzE{A>AJ$8n8utiZ zOrUS^pb)NjH^k2p=EJN=Gl>+EL^?2_{GQb`XPV5m(L3E7$3-Qr5LAQ0#OTCLR3){? z#$U|LIfAPh4sFn1Z&@>&`PX(EacP!+hZ`X4r|(p2{(_or-M8~34yG((D#SvbWm#-b zT8@0B&4d>3Zk-d@RB+!KzZU3aGrGRb)Q@;;yUDP^tL^?W$Y>`VxmE^li0Ui@*p?&sWH`>j+R&LvzBO3%0#wZ0V*yfTI703DNu3)G| z+~=@Ia;tHqo9ME6y#3#IaQmPwDk$rP+B*DmBWiQRyL?1##KBWqsUP;*+7J&}i-Rz? zmE|%hDuI7Bm258*39c0DY6e=LD(>08K^G=8Nq}9(V`}Xnec2RRiZ>H(zW5IW!>2z4 zrx>&n{{w$edw1>+-WdO6;Ts=9eZ=Mt`LR4mPN4UPrepkc=vbdp^8S<|icOv#f4Y3S z-(;S9PWFkgznlvJxeOtiVM$TA8q(`{EcpM-e=eBc4!=(=Z}uf3#&BPGKNM22{ozmk z%h?11NTQ#EUC}^fu=e5lTBR=oExxe_>xjzClB=IhE|sFpy3-D(9jZY1uKVj_5DwZz|If_ zK<-Jmx$;DlCJsg3mlfDb4+y%t<8@dSM(L7HhGet5 zM_B*vi1(c&h3xqej!YNk0CY?legU#jp(Z?rOZ$_P;f`1f@zsnRDGSn$DVe+i$0LGH zr?B9vV>}FU$m01Q%yE|s{#i;`#e=Q;RGxFji}B|#a;43WRT&Vxq{AVTONq9PU%CHN z*&Lg9AFs~28B0WztdAWO`(Pj_1u-4a>q&R9SYsbd?KEzK?@!JQ(#Iu8>wTmPVt^x_ zSnKS9=@1fy-T0H zCV{NooW{3qKQU?OHst3Xjnn+5+hWQlf>SORXl1U|5o;<0So9s%wHh83R?)F1?FiW( zz6&E$TEh*<5w=XB)~Km9U(Mx&w1WxHFs*}4hoV_Hka(4LDFyRulvx#C^N;cQz7Gj8ufwYg(A&0G7gESu5fw{ z)7c!DC8Pbs2EK*E&yjp$4!XcEtU{-KNnF*fSozHLf)@Clg}3cb6_dW@(v@jVbp_|l z5Ej7)-i4?oO+f*A9{Z6?hO>;#^q<1~|0p6}PW_+iB+7et;!k6aD)vd! z<5x5FthXw98+;4Y-}dlbUnbsn2=7^3zeY0y|7`~A`T%o^-QBX}8t*ZFRoRr}`ckLD zbmSTl{gdn+M-G^FqYb!8yH-i8uB)Wb*PrU~ox9mS%GCKyT58k~8Z>y`K%)|(?2G8U zIld6IuB;<_WqsSfq5sN;PO|!*X_79bmqnjT6U7U%Z)%H z6|HhrZ>BIx?f~0spZKMQq?!~f--Y(9nh>|$q+PUN`R=h$aePz{FxK;d_l>;ydd69I zb+gzguEwqTZzCaQp>qX7=Xq=`1X=`bgp}QIe>Id6PZm${B7`H|UofWwZia)$%|7nI z3D-|ivNAaZKGlRPb=S9KFMXRr1QO>Yjb(;ep{Rk{Ny;zvQfS+kA^w6S`VD z-83F-JlD-%NM9nqLpu?g8!edI)oHZkk5)AvSY?MfL+FDQCVu5tuI63nY&Gn7BuHCi3BC;71!bDLGv_15k-WXVGCQe{?UCGiJUV%eswR<~ zKS$en`(wf)5{I0Ti!EnhaYBhih*xsHJ~Yd&I69ZEasPYxHs*PgMM?7G#Wxsf?}Sds z0y{3T1A80?6(RT{U3Pv>+eZ%QhbdXUlbO(nNK5?fb{E`v7`fU1mizbw;*NKcD8{ zFVh^SknX^TvwoM0m-w&P9kfqsl3gKS(~+5y%}npE*;Q60FBB#l8B*;ai$U^;sq$;+$E#L9od_!PmeGPU5Z0OAm zTyGLQ+oZohRJh&V1T6G>DDk`{icOt@7QyJ^FHgM}M+&rA-papI`F@Tb+YB`RTZqZW z&0<7i30nJQ&NsN$?uj)9F?uyoatv|qo{fc!tc}N-S@(cYH=p2(i>)qRQAZt(f5mNpuIc9bsM0WGg`#cYVIzd}7Kl{c_9{DoP^~asp=h5y8 zVQTv}FNg@}JxI70jyvEYb-7{9Z#;b$)xl1h%n_>xd;#4crzeTVO8p1tr1$m0v*PHa zL)XR4N9KWvZ+tsB`05e02{!8XA!o&;X(V%%h)aP^ieO70%r6rpk& zifux?ucD1zu^)Mx7jvH)23ULkUv+McbaP)cTJ3_@CB*+v*8Uk~p>3iZ0sCN*csrn| zKHR5op`YW+f}zchB4;O}<9|a%1_1eU1J7(znI*RNU%w*F)TfmWG3>xWk0%<57ZvFR zu~?YH^7pvfOP$Du<{OOmATP)UcCFj2ZxAG0)+;G~4RiFu{tW-1g0+MB_naNFK_m;T>O(j*-t=*ByJIkER??n|Ftn zt&6yf(kei!D_yFI@|c>(BSN-1o?)?D&3~jRy>rC=M&YdRpZ{}iFfwpfmNlk+^k3f0+1kfN@xDUH>Q2E6rkHlKJH@3LzDwmlP?=95S%&Txgi zY6=iZLh0Hi#kM6Y2-Skpf8^+@&cImB8@t=tA=9Xdsx;$>G1vQBcU+@x38nFHEJj8{ zVgKDJ$5`=?yV%S%^h1()*$MK;gFA+HN?xzDiZa()&GlZFSe+TBdrTT>H#p1!YPtbIgYeOhZG9O+Qomuj>qGoc{!p zY-Nr)esVrtmd$!qzxmzVs?kYGMP%Q*tVkm!Maq@S&7d8DkhqdOo;y-q)xdwkQuZbn zu%+40D|HXpF}`RI=lunv#2YWcX#nmTJ#1i33%x;Id^gjb=e?0a&EN^E+h-eJa*eYO zfopR=pkpvP2`CSK0?5hY+P-=-BX_%NPELcYq4D#JUDXowoup0

H{#A`d`Q;AnM5 z_!CTT!>od5pHQedBE8yddgk$=}^#ocdl0M==riEqs~VLoEAB~JbW`EJ@C(~ zbq$%pzg$4@xlEleyGnuQ6#rVz1;OQ1k_NprMf1tf+hI(kSS+Tc zqkLs@Z+6-0{J`to&+ne@IdK^m*#xffxVF1&VwIYyJ_c7&u)0$IlN06e3#Q4E1=2K^ zMCG0nNxarL`qz|NqYwy}XOjfn(%4@b?^wq}753)%_?-?A1<702mmuUC$Gdo1{LKyR zLm81C5ErjhzqG*oewtTdcwLzE9ObQvrxp>*C)B8C@&5d0S1+z(J7h*h!p2x)+?SJQ zgsZM_Glj*La+dW?)+|;9_tw_OL_%acVn~+iq=B=C1&es_K!(S(s+y{w0iVw66H&D< zU}^aKYcH`3p|WsByzg-rpTf^5^WTV<1vuj`LXXuRsR8f;jZH?E6EyA-z7P~gcu4bw zYSOGiN6+FJ7j>Ly549u>=N@+qQ5ggO`Z0mYT&q!maY_5mP2OobE(MY5t~(|~k03^W z#2=q@gG!9P-k2-^(@aRu%M!LLNPAJdHb-C`2S5z)Fr z>Fcw9SM@E3C1ZTE0l!7Wq4pfhpTcGcr=+37b$Eih;%^ckK2P-E10ty)-qt+%rdEPL z32L7+DgJRP$GZUzPT)3k|LcuITbIm-S(uT9s9aXU1Te373S0Yu15pxQz+4&Tue7gd z7gmb$EB?%CIZmS9BuYZdD!Ni#ZFA6M+i&Yt_yFE#485`E8UMOR zo!e4!YBby+{!Sw0b=P7G^2Ei-m%34H&oU;#{Nr;e@a6_r7yqb}aZC8Buq{~m$ zZ?Yy#?>_6>2|o3g&)cQR5b{{-9#{31ty92^#vO#DmxRvj@S_fTKZ^-rk8~onm+b@> zzggB7_ktoC#))YN=N5AdrmIE!9H#+Z>EL;di%9C146`l%s^ zh}_p{sHu0+L3!n>4x()m=c(*!sQX^Ro(GXWL8lca{~&+n@6ju>H%!%~Nh?R1(hj*N zYEVY){`T3m9@b8PxvPv*<3Cky=oUApq>1s-Qdz8hpmhH4(!PyLMa55XPrdliJ^E}} z{^Y4C@5TcE$)ly~mR%jksz;PcR7@+#Oz@)Qk#!7=RRYS|hBbkd>Q?h@^%`A$XS*1b z_}s}a&k&5Yegr-|#0~(`<*~Fd!Ywu`Zm#X4&Bdx4WYYI{67US10!T4n*f!1<{ES#b zFt)3#FmB|#J&BO4;+jF}ya&VyZ(j}HP|4aU7A$7{GuidV^=?J_fa)7&Ux#Hl&DmP@hHrWmE4OGrVNP^A!tPE(1w_69dUMf|8Q0!zo&Ydi!7s+hI^NZgp5&mrYblR~a< zA*qRkJpBbhvh#6wNMXppI+3LdMh%D^=mno+zZ>ZMc6wQndUAgdw8^QZMa4T%&Es?V zk4zeTyNwky4#;FU-OfHI)xqiYRi5EkR8)fo#v1Mpxr?bD3V!NTTnq9a?&yx$QjK9W zw7*E;nuQ7l+Y)mLmLTxI(h)jMg9q`^GpDOh|8KuxCtCO|W1u=Z0pDE>MI3#VD`?H~pWu9|#C=0-)sD|o{sI2R)`>ETZ0DN-*xRzc5t&^&rc}U?iof-e1H{0#&<<0J3}>r=k_@ zO*I4Fd9t#6hf1b~v>XJoACv=~9~|#f;;byygFrn5^_=_R8pfTfM*H<{_I;#9sJyMa zWx?v@r^UfjbBb8e7wg4c#+UMy)d>Bzta_J0y%HBkliC>q z6fR(ji!^1ol#+;vjV45dXcC_%);;{2p&`B31 zDBE8l%2m!>#U+GGFUmwdl9_rqRTb@|dI>VSd^{%ku6i_@=i*UJSp=ibCXuEv(2-%9 zA#|J$PA|T){5UtjgNFD~eY-uT14{f6o)C@`c#?d#=`i8KdFGTppdNbHoi{AzUPV9b z-{k*mn=WjF6{pfR;u|;I`G@3|_6D-^DP*lYhjy)qcCr4QO|(DAuJMr3ycOF5p&W4Q z#6*~_PL&)4rML0o^BRU4>+0vj(VF-+WW;t^H3El%e{uh!(LYk}#_VsV-`awX6Gq(I@$AfrdX$BzL&;QPv~e0yNx(c2;M&1$$-_ zRWZXHRC>XV*eTczyp{r@ms_PK@YfT|Nq6#f<+?H!yDmefJ*D-;pR= z&nYH;amej($iTFx@cV|3nwhCk&Q^x0eM+pJVDr+i(E#z@&2LueRV$8vaKHaFh!#2q z>etD08D)c(YO5P8Q)kh0@4XHQUNR4doG%zQ(h_Eb#cqd>y&qvz4SjTxm2gxtFrBC} zx0a=yt<9o@cG-QwEj6-z$@3#iuT)*eR) z=zw7tc>qM&0>+}cgaiEz5QYB=2n>NRt@OF@gF7OD*qOuoI$lpdE&2v+h9~Mn#q~?~ zTH=%ZB%|%TgPd%_eN+wF$~nJh$yWRq`fH*8z#)SK6x~1g7-m#euDJf}db)3}_{SPf z*DIn$Qe$+f7@L1R;8J`NXp=$m=*OxBTS*rQl-Iql$e$2t{Do`XBh;yQeR$(w`6{ZX z_2i7m{sg@`AG>DUH6D&+B)kS%-WaaiKpywq&?)nk)zKZMU1Pe*fe>Yt`vI@S_1u!Q zc+#JT5{fGP&o`irX!lrkMq(r8BVMU6 z+`PmcS{%trypEsRz-40wx|3HNg=Z(;$luy^lQs{#$s zUnueU$8^N+-o^&q0C$f#Q!bT7*n=CDTO3sR1NS;@eS9ne6)P4+yXJcC-@=;dOH_(2 z_dAa}z@qZZ<}nX{_|FvmfoudK(!BAuSchInWFSLyJwptX1-liQgy!_o%YLi?Z4^rqN|}*oDj}4L?y6U#KP*24|yfFaB!@Mb~}(^@X*`kedq9(3xL3=$WVu7 z$WiJGgdg($Prd&WufM?y(;~t@d?2u($8R_@c9lA+3p4_Gi*d%)GN@&zbLs9$OH&Ip zki~Vn?6HYWC z&S0#&VBzvmy2&&67Z|hD>9RB7nkD#6N?v{X2rF$`nND;xuL$=IMd^Fn?L8Z?&ehO= zWQ-)hj@?bZfuLP(hDHJX7|@1_x;e2^-2}P%HSG;wuw8&OLgr1PZK-EF|#}$(fx+9 z$&16yxY}U3`xAz#DQt`Ixq%)KpHq$~hFDc>zilp7NPXA6Eds+ztUZUUxlGFOa-lXj z6|`!E%8Z`ArEN(HyRJkdfQ*Jj!GSwB=3vH}-yefOl>V778>S@GNBmraT`MOqA5+`C zW&H8wtp~f}D{^ry^xkC@f%h8L^Q9?Z^5Kmzu})msb31xO!p&%O?w@WWYilgvGQAVq zy)SX@wcoALY%@9gWqaPQ`|tGy>5o$Z6i!3P|NT;Q| z0&Zzs0L|h8>}EX(uwk>PbK`?ZOv{fQvj%n)vTC)u(8)@+^jkIm$O1e(bjI%@h2P;D zZPtqwxY&CtDa3h7o0Io;8yT*g%K{E#hzeqz?};(RW)k%ac`HtuDt|(X*T&<(G;)UK z=9werUbzh)GSG=i{qhdtxiV$ z6;eL6M$A>G(sXPf@lIFD4%{E!UWLT_&xEZpZ*v0CEZttZV2umGBIpc~%Sy(D%1riF zL3pEX&6A^BgT{QwuOESVY#pLs4gA!%Y4X(cn+?;p$(EU;SQj z<%@<{%MkA)ogxxUh!MO`i(UKE3PBHzOB@{Qh$zoru-9=){*%6Sn$)K+UkMEB_TEq`>-bUYE0S+jIU(<48fDt&COr2DJ({af!uC~GIR zrdwbnhaxLRguoJcxenzZ7!eD7N=9q&;RZ3?BKDmy^>xAf>Xo9P&mjqUPBnnv8b-gT z&4r9K1$ePHQip~fR#SefOD=%e~?&G;a{ zhz!qG`h@UpA?pNn?wWr&tZZmDjMUTFGIogjkQgbhZU#)R6J(5|is0e}Ro}LpE@unrRezIh6dL)OuMy#jz54xPAOExWIR!gqg+ z8r_?fd&N0)5zV=c%mFU|UO5OF9I8Cm#}EsY4N<`8gb}ZNlUH)@q=%~kA&0t{uq?_* z+ZVgjtd}B9?$gF3ke4{xlK{c{0F^N>@h8mH9rXskk2VfJ{fJSBQkee}$Qxghin~$r z|#=IPe}fxcvtO@T9~bTb7q#=pEr9Lf5_c!O@0c}urD%aT*dS3=X=P|{U$GgEWG5Nm=%{tI>|W>=8N zgF(p59+Xw-&bV8GVm-T`k-exu>ha6J+Y4L|O{pICZea?e4eIcB0b?K_LIOeAvqdbc zXKdT1{%f8%QXkc?OFiI^i{x{R9kokZ90@Qyzev`hK(LA&RAOb&u-bv*!=%y3GUD?KR~$45Q+G`=%$485CNg?- z+(o#oy}e@-)FLu<2|HowMC>f|^LB$N??m7@FI0`1{YB|ajjt15D45H*max5X?CY)O zlz4nhA#W&=D08KEr3IZp!4HM6l*iE^s$KbHrwsK1m_z0U{Ch5|92zJyL>B1p7gEg=81}~AHJ47AhP`@(s``^V|K3f_321zzy(-sr7 zcyC&8;VJ$cfBymWD)f)Np^Yd?Ecu1BdySK+4jSP_7X`44C(h+tI@*tIBT9wN_|Fjw;|wk? z08};qV};g1j^`?9LkzzGR8uZ(1_9tN9e42}W1@5(CeHT@m=)NPY5J8t8`Yawi9Y^X zA@Zwl=8MN1Xj$87jQ01>5Gc{s$`==ohyRgDg7Ff?UG9wj`6SvcEV38+l-XRb z!qnA?H{}bL-H*lYr0I`i!3Y4k2^Ft(tT>RRS2oF_WAT&>@37AukOX|B1>Rk;ke@nYN!=jv5Bw33AT ze&wrr^mhrq75Z^V)p65xh~tlX$|Vi<+G$hUYoDS&{-m)9HZ_3(UiI-}k%gHk;zyqV zNNWb3f$?D(hu!0wrXAU-KqtRrWe}b+p3)(OkNA_Jr+K62r*Oi5d-?%>2Lo5E!7qsA z2-vzK{$dL=9!i-kDNGz&K43a&m{v~AtuYx!^Q=Fyi5I~oPP@yM+?{Z>b0p3DNdLW_ z@<1<9n;#vA6Eg12`QK)qOInIn6AEPys|p`{(TLK6Fj*i7&?-pey)hv z9B22oc%Ms5OBEht9lN{Sh+2s`#I>BM>TLXZSywGucNGB3BqZ$Xts@e^4pIx{U1n*a zBtDacT`Qh&$2UpZ*+NMrt7?5cgDXMJND+5U*ZBS73DxksrY^e&2SjxG+Z* zEjb1J0q+q53k~{qb{Pq?psqldpJv!`{sB0^|G;e71M7+7tH-t;rf8)_CKksvao9L<=6*PO>;qojNPhK$SMcgmqlKok+o4)H}n{SJ1?@AZ&tk%hPsDv)ziW?Adq8ulnsLzvXhi$bYf#OQ>8Nv7sB*yJ_k@JcR5y0^0}MVY$%#=g9fMx(QQ> zNol*n;RCR!#p$mQi1qWifs9e*@lGV`;$EQayGk?7W!94G*I!Y;iDw}5T;$!yMs3g# zSRY%n%ysoR`iB`f@W^)0%OCuFXNV}QnFXY+p(n$P@bQ=E(Vja_k@wr_5``wv}>1xhaFXxZhw z%-SezSkhUV>#tV6BZJ=o(xlPJ_?Z;y=(s7Slo=~Vvvl4r}>t! z={*1^C(zhjnffQ7SHI*OlNgEWD!LM~U%>`Z`c1Pwi%EhKS;pRs4ZjNv(yN5I3&^N6 z+IyrI&x^t33R9e zs|L6;?NV3g*^{RWO6B5E9u)||8Sbe8AhXMdZ^Lh*Mu>v4? z2~#ICi-B{qsL#QAuJQ=yaOd>z%g^?`hOY}fH}|~U22yc>I8c4>*FYdSgxa}f&wp9f zvbg>qS(IG1UB>Jff09PLlI!>EwCn0mXkB05)ksW8MY41#LI=UO)j6Fzm?oeME6#si z)bDjzf+3R!1<5YJa{;%+lf6)Iy~uh%{)Ty!>@Lz2=?LNRUj$fFB#vN;iTcyoV9hm& z``xJX*U`z2o2_4SHcqpPk1CGd8-YT!w5aK!<5?T;^CL?`D+*tfr>qC2)xB;#-+48p zBEg#(q@Fb&*J1czxKIlbB-FUu-ePV~XlGa$Ux>o7VT-#$oHwMH@frJubMKoIhg6fa zU1-fPU9KCkcbK$GmjV+#Gp_V{SE+)*QeW(__rA(D`&82zvnRF+Zyw)zCiu{@st^K_ zJW|eiC~^QV_D|~r)6{|Z@X3yLvvNWA+}&>LwTY%baTO8%bX}VUN!`)LW)>9_EG)bY zXQ`VlYzS_BGQ@bVd2om?<-%SW!nOB0U%yl@!yu-0QcbpTOoI1#0%Z7kxb*RN7RfU{VQSyzRM28zfUADhGx?YxYa_elq_09k$q`*Aw6Nw^NwdN;Y@9MF+ zze_(VW-6%!9sR zj|6#)aY|QU7f*lWy3glz{#NrRUL8$IP|ts4A+~eda|HG$xS#|ykiJmd|7yXsC!h>3 zGlFFe8!b%p$t7YGFMht0R79M9lNx_`dtdW~+g$BTj_iYvn+TUBl60GJDXE7~q*3Qs z)|%PTn6}w*!x8aXqJKvEiL*%@Zr|$bJIptnz|7*Wgg)iNNOa>!rdF~MU$42o{57bN z6Q2Z@hbv4|Jta?z%(6xiP6a<&dkgCBsKBh`Zb1Cf@ImX9_{z3~VE;sK=?ScGHzT|0TqR?=bWdies^tVHfwv1P z9J-Z;KBoKWG2D7a)%am8?4h!jb`xQLvmM;sAv(Rnt#VV${K}6~{muBDE+v)c=7R)0 z4)MAM(~wTr(89Zw^0~10!ihJ$S`#G7=_Cy)5pUkNIOct=2l*>%_kDu&3C=CMRZonq zJS`zK&q^rh#_~sM(f+0jQxv2WoJDdILV$m?Ls!m%?3<+^nXreA9NeCR7Cs0RB$nY? z)Io@A|DUPnQ@35$d?F~PvG@!^bE~Z@c=T`Qle&8xZt0s?5{wIfslSlTqJ3h0^%U=G z9S-8GAQ%^+`akZ@jP5e3WM3!$UOixwl*QZ0g_kmLNdlUkdw3tea%syye#+t*k=geX zC!Sl_1YV`g>+CUbAoj%2kb~WI>lpbdQ;PyCyGhcXHpl#+Q}C(DEiYZ0PM~%HbaP>@gx6T@P$-C? zgMoCz85UeXj8GwM_d=77T=~+6S9)FcJ~Xa{*rUflN0X{k-<063>^4d z;KNl0`J~caKTilVyKd2_$w;mC_m|MMmjjAIXhHB8LjvLM3OBa!SGWi79luq+u9E;S zFZuJqU#leuhUlMlScRe$_~vodoaLES$ytr~^Aa;xn03M?w9aA?{|yJjt6?e7>vwaD zA?q-hRf*7I8Fn~^~u+^!|B`_&DqYf{;=bzL^DqnDv$ei_^0R}b(ppxXl* z@EewT^rzQn8#7eTTkoVCqMh#6H8-`~8{!mtZaQd?MN1R~9)QdLEoQ$k_1n&HHaEh< zfOqQwu64RLKe2jdRb^?jjd!=uO`jgQ1gUJl92N<#=`Z}*M3InybV!H**BF(S6Rc`6 z*$tTh$j-wG;s^b0uIcA-nLTB(w6JLYYl~|<_zT~=WVU-<_TN|ui{E$VpIt1~w0%VS z`T4njl@JU$n$oqlvOSxS@3^eP&OB5M{O~#_j4Vd9=pta^pt))!3IK4FPc4mL6x^@H53%{^#Q;hptuL zHnl2z_TKEy*{TF$PyVQOX)cP(+{I!}R%ME>9q4{fd;DvDoy4^2=&AG7^g zWspQVe%0;`ghWXO3kOs=m9Af@9are)Sv(SM{PAo&GuJ^huH^j?!%La1|4y)&gjaL2 zUv1X4d)>e2%2%>w=B@zBJjtC|H%SSHrq?c>tUu#QziYOSdWkj(YyJ7b1D_OyIrMxH zzTf08e{^c#CYCzasw;#D3c9+u8}xERnKp0acCmukRkdC!{eqJI@UoP9%xBhBPdF8x6)vOwH!iZ`qqFYF;X$samZajZDo_w|;l6%;bomj@yqR1p{aA_tcivgR21cF842~3S2@f55!7f_DihqYyxDpj!=r0kM7?5 zLYGU)Q;-PcA&}zQ6da!emj)vb$f!Qi5VuIF5WN>^*2KL$l82TBo6vrY$0 z&Dyuy%yH0vWXaaH)E+`VyblG9>*iRkqca8o>8#j8_`m7@Upi}p`~Zknf?g4;g5Q-` zK=-!In~X=9?>!+UFsj7+QFn6iwwxob)%=!~kTP*q9liVJ)vw9X9JBHVwE5DxA~dB0 zvHfw>isk(|Tpl*MY4XTz$NWn+?$_*>e&5@;4L~&c%5@&%d;v$a`OYP zu)=~tvdH~@n3dRpz^agYecI-8uHEJXC8BCL2+`7IVq{e_!5>`?;32Al6zz0mOs zHr5uw6nJn6={Jna4sYrDurYEwK&UwdNR4TXraTzuo+p&&P|8Cnkd{y}f&kv;>b}1g z03zm`LSs%@%rj&6D=xn23pqumZaZ8Q1m6mvRldeb5JUShhnQ|Wd@yt3wY$A4*>DI_ zad*r8G-MZ3&Z)PafG#S>s)}vE4@1_i=f%u1+6N;TaL+7B0_ztq$!j#WY5Txb>#?q% z%wqq+xcI;aN19O$P`AsT2~;Im8*;-Lwc0;UH;-46{X!Qub8Vgw`JpKM_NK3nSPVr$rkn zxRm{73E?opE_CR6v}jQZ6Z3Lvm8u>QoiXGNpK>hRbss>i`4Ukhp&WM5QlNYhC8fOk3 z$s7Bg>r5Ux5`&~j@LJr8D3*NyJm1>e0vZ_Obvr|R_WNz}2USrmDsAQ*bHZ_s z=HB?2rbsDzZ!E-CRjS`M{iC

xfq~Q?cjyx8dy~E=qp73s%eG!FJZTz2P*ut<}?p zAzxtX<$V!A>`4?nD>Br!0SSd zQ^D+>LLtCta*GnevNHqT}V;2!3yRm^Rb5A&DvU2|WS>(P>7jD;z2`Iq6 zAZeiF{;k@0G`0aI^%S$!Yb+S7Xv4ttj}Jh` zxEkF8lE~e~`n=iWyw=$PK$%oN?Whl7L|kiG#LoQ%n?P?81VAX7F)L(=jqgO2QxOxKFihJC(%Bz{AfJkN3)>dHcjHgUm%2quSQYU!&7OdWlJ0MUtvuQDkJ`KE{`%^NrMhPg7l|sp zanl!`Skza(w+l2M#tfCrB2$ru78`-&^0=q?rT=m~ z{K%)|uDaHx76hMSG;2(I-1+8zF_x{p{n!bv3|uK|Vdx&GgOrUF55YWdzxZvkD7jlz zpeeES-z8l&iS=3n1+utC{igpj7DP<>2)E0GFw*=CxcKy%?s~ZyXA!bBH%d4M4m=-d zADGD}S_lj4{c`ltk^%)0eQV@z-RWJWG6#w>5vT2k1Immr=Fks)!kgqIygpZp2yygRId1L zX;P}Bw&fEfxhokvbJ1NW6_lbPLHb8_0u}+U{ZzjXM1t8fpdekB?gv}>3;2ZY9M!t- zFbB&{`xH5Ihh)8I_20H%^IwxEj8LTviY*!M$PILdfN>o&Bo=%)`mFCbjG&7i2{BcD zkoqja)s;1Dz*B8IujaE?jh%0IMO;$8Qg9Xn&{wYgw_5j)tWSv~=MUQuiF2(wKIOe> z7Qe#X8O>Ul=gg!^%}Pf7_`0dO*XUvbzeEUkWiZQxJ8}&kxS26p367CvV8Ew#{|c}L zIfkzAv%l<7W>#HrP06E{AEEqTm=4b87G5Y`{yXFVl z&doKuwF)WcaH390E@H?2`ybgI2;(0ZQ=7s@h<3MvLb;$*+8mp$9)~YIT>>|s7B~uM z)XmHc2fn&W=1<94J|44qx`J$rWD4(8q*I3zoiEdH0~ojTe9{7(T~*Ge@S=JiUu$G6 zc(6NSp$hN!`{;HHtc-PM*f;CXzQM&Cne7T>uaz-tIIb>p3`=L74WU_ytPmDa4M+>& z*Zgne>cbowEPLGh7*W}g5(iarJ&MH(hu_j+qpJo}vn~I~sDYemXze=D@J^%DmW3)+ z-98V+6z>EG9!q&o<)oZU=fwTpbMyAg8$vrtFn*?DONwP`j;DDs zkLIyW7>?kAehZPbOL^`*aQtbmgF#~#GHd>PYW1MOQ#3$}gKpUxYJE#s7MZqoQaHF_!IP^Z@=I^$5HfpW?H8PUZMx48YLOJz>edr=EZqc{qa=rmL zqISAmjDT04oLO!g9VS~=p!ig2c(9@{Up=3bla*-x#NQ2_wdVdh{`$3ZzN}Nhxv7<= z?4WK&i$#bC&U-0cHE`7-zF6;OlltKWdmqTC5WTq$iiyx3>j z^*%1VdAbakia+zF({yC!^aFHa7(WYOM+2Jst|v%acr;V+b37$qCR*_A-FPJlKDlJ` zfQj5x?pU$Bu&2rLwjYf>H2Q)QZevn-pu8FyqFNS1<)ZQ_70a2YD=5c9n`dXm%FYjc zomR)kgO2||NAT8Yc;`534oBWL6Z{@`DOoI^5MonYkQ@-0y;54(dHDa)bROPp{_p>% z+M=pxOKolKt@ch!?Gk&FwrZD9n@CiZ+C@>j_HMNH-h0)IP0ZQ}MM&iRyFcfAe}6#E z$+=(meZ8*nd_FF|p$Jj>-lj-K?Zx3?TB^{3&3Tw7hKwMNld`wl&?Q6ne-`y=E?2UG zR875$J3Mkr^@+{vdK$-&zAKiuy|vEGpi&^13>sVg49FV*O-$;iAzY9Fh9;)QRp}|p zH29gfoA3t1MN^C&_q&fTj1x82&D=J_)W|;5^8&iO)K!dOd+Z5C?Kst*uW2gk#=1^C z?Ft2_#OvbpL7{j-uZ^};yynV4BBaJvgoPz zdt}DPAw4!O*+mjUx-DP&nhM1Y#6O@97Ygw&-ay5>Aza26ZVYJ>_>0CwYUV^=h`XTP z{QmuWFvZn>aa#y6E7mcvdoSf@aehiq zs)FZ4Xekj*e@*QaXqr=?!H`wZ^J~$9q^c*MqMWhA_(g{aD<31tU2kMFnh?2GDmz`u zU+?yAF_0^9rru4V)hZbLfF(BB!e!A$)g*f5y1~!c;$iwg2=Bp+uUuA zi0^Yq7bNX0*#VtQ{IXrO^?^iYRQ{j1cjo^JySM+5xV3*tl}8*p zZ^SB^_do3xZUOA01$=d3o77>BqGPLiVER*~)ZSLTzMIv*qxkckSdZkqvi3W;wK!QZ z?b-vtBmV3^4xT``i+(xpdwWuN-DYY(LYbjpG;lH}_LyBM2-O?N+p~XYh_#>D3Z?Af zWzwQ=b0DcQEMxSLPFCE!GFy7{x663SHIER(x*0zMXcpS4D{wk}39d$I*4;(Y6gO`% zhdm|?-3CAR8u(X16ICDhO2XoAdhE0>f56i&p6?0F)$HQWmB3`HlMU^2Q#Rp8-K%{ z#-8!AafoEqaMHVL%DDss?y#F%IHq+`QkW^=5H{*b8B_bD;auoG;w;}Ba9T%TYwU3{ zzM}P4#J1Nyik1Q_xf{Ci2YB>;YT`v7^HyDg&lb8RS~Vl%{N`)wx?gQd1H6^SSaN{k zg624g#N=pSrwczY#^%JhRVq)F+CHM24=#-qrE5-3+dP>&xFI{QQo>Tc4(xM+M8hTF zlMrBJOhb4#TbVbB{`2`#dT(sDZ+08RqizlBb6ApY{znx@iad?}B7W(RR>+*}Vw;qh zi?O_=3${Az!Fdv1-UE%1M? z`S&&nF#YaD#;h{*_%@AyX;JX;6h8X-wmxM@L%h^OGvTd1Q(efM=6P0sTqCj-c!v-R zsvYelL~0j9T-@y$_cMxgmSAP+g-M40NK)cfJ0dSWZU~XEVF`YJS26m*qCh#SmFv&Z z?f!OB=*(5}fnG)8lD5MOkw5;|eVE_;o3gS)F(?%7B57Shap<8(bo#^GfP&Sg<3hs0 zQOdRD?^jnsQuG2~aDYoo%J``ak!O#rgI&Z~a{2!DP8ajPc9#Z;!q&FJ!U}goBeD^# zhB)2II;I(}So8O(P9_|x1R>z91unEsVSrf8YjmCB%>sV-H*l=Hq>t!7nVT(`5gU2` z!MWC!594klm`SeJk>G$6T)*}`Kbg{xRSvMFG=+V>>CP7HM+>?qhm0oz`nyg!Wm;V7 zM%+iNs(XT|O?TMv)Jh(X@Q2TgkH0}{VSk3H02^*6TVK-C!|KbA6C>9b=8 zokxQN|0epL`ngqauWfz$xs+-fX@N*oZojUCy;vI$gFmXlC2dq{vru!G_|GmJttooC zXYhy}m|fUkQR8;v&Mw=r@#pl{x&O;wE&YJyftaf+HkvL5Kf_fQ`P$RGxtgbn{Q_c7 ziYYXP9)@jJi*@v$Q~o4&H<8CG(&yn2J-$v8=GNW5!>%`Pxtq$||D3*pH79qzA+h6o zCzN8lmBsWr2Nnd|>AK>HEnf5w1 zzu^^#Y}QF0tJL(z%`^gk#@Cu82bxp_N zlCo|Gg)%}=(U%EU@Uxd?#_t_26U?7QBUS`s;N4jag2Gag%gRrnP&a(RiwPCacw&1X zp#&$2jU-6nU+zixBh*s*JKpk|`)DP*OT52tv1q zwO13Hv0m!Uq3c8{8P_MAbgcDyZPo(DABMLR8hknwv9|HynYyXy>(*Ce2Ioh5M|($X z!Ye1uzqJpSTWP0*6TjvEKCJul&rHXODXy>CaebhhN)0Z0#rjKQbu# zQk;Fs{qk3{&Cc5nN1=(-;N0oNz>qmPjxr(%E?Kk(e8YKRbo$S-)K;FR@0)~=2F+xQ zivt_A?EF}pyD#%Yy7aVJ>zj0K+TtDas?P!sko})~40?H~mIxWwm9^uG8akNIGpU^j zrp5jO*SicSDE(hrJ4-S$^DCOnLVbJcY{m>!wUJM%F`$El^`eLk_Siz0DyDS?8GV>U z z_%5KMulkszRQ$GAu82_GdgU1Gcn6pQmul96q2D@~{42f(acQV_UeUBq7dO_NsHXQb z0X>mh0B|fC8>mjmO*QrHpIwUbJ-&16mpzxyC2b0Cz5JT@Y=6mkxW_vgTc1D&}`)tS_Hj zaN9n&S&m(6Dj;&#V(H_uwmRPgK+DxU8`N`uczXDfj&?e^;bZdZ^#zL+sqiN$Z0u|YH*(ueHB#BE-U`0afP+a; zN41&dh5nA|Y57aj;oF`|Pf^@E=Je z5z>y)UpAybK*wY158Z;eczZ2Bnnk@$NaR|WXCw*nnnrcmvSHar7M|s#fBmRRA5bk#Z%{lNaSBA&;^_R$WJXb^^i`bRaq>}tfcnFaeRBJ@3iqo0 zB^`XSo#~Az@+_n;bGjBy^fcT(K=Q=hu8CU4E75_^CZ|rp7Hdcb|KaYGiPpo`9xI3V zu>g^b03q5s2FLk+^senV2HFRxsV047YrZq~d-*7hm{ACCB;B=g=H7hr`vsAK9k+jZ z>{_z-;d?<_Q-h(jhzSbHD<@zOtTcWGG(>K=1}e3MkFq0$UitN?h+V3ir`~zvuJocW z;QZ(3i0J573$hU-@n;K|*7&fqoDf@%-?$jzu`KDN;_DwaO$BN(2FZ7O%68z-#(7$X zjesU97=5U?y3gBfDxHwY#M2VaH@(A}@{Mo14RMW>%5Zx`q|Cqf%g1FQN~6#}xTR{J z?)|XysrzeVH~doE+LfBJr~ws;K_RMrEptzA^*)X_=PJhH8nM%=%2NqsDr>KqA#^yd z5HZSGVR;YGPJ1mPCb=!4xSuXXle1$pE3Xt80fpI=%;Q|UO>dlB5w{gwgZ#fTai^G4 zvbrc#KO+g6H7rRrDwF>QT8QY9cRb);moo!nq;t`_T05)E{q`k`V`Fwkf+3|6_f1HB z|LJS3Hz|NAi2W$v?W^7vB`$-nuM(EKjM?+Kv_7H$JAh&iPNw=?A6Je63|FLi{`u>D znX}A#$px+tc4ZO=Dhs9auv7{ShlOS*1r2dL-S+6wF(Ah_67rE=-@+t-L`N%iDG><@yTLR9$&J%5U_GNr%t-wT+ceC;H=C_)D*exB?hARV=xG1SY*RO7-^{e_9ZnDGnnr_d_Wx=1 zpAbje#BrXEx$rTdDP)IgaV#iE^bb^W1v9_<%i#BUG8ba7j~Fh##cR;NmMYWCck0!*`q0qTM)cUq%~l6J zt3WTKTj_I@lDb?|S$i!5@_W<+G*~rW&aXpm(nFeKZO1vTlnXxfKrnjECW6p)=&i?c zF;T`ov(z#Dh9dlImuf6KX_-G zZ6NB`X)v*bK_obDcmD}r`1+CTC-E~f?@wHQNNgI84r|wks2=apRakfIRw%A&uS%CQ z`SDa{#H~>~d#pHFNuu+X4%zhv>>;iYlSI68jAvRIEm>OJ&6chXl}yylef!Cfq_S`? z9G{@4+WUBLF^!Y!d#7?&#u02L;TrBQhLgp?c`&SXQ_f+(Vh_12-tzMLGUsJZUu!>*Wq#uIeP6K2~9q+GD`$z(7CtivmfosoqMbx7@gzM!|U9I+tY*>a_sfA(u z7v9>&lIdOlUwF?H*6IKOg?} zaPLoSa{9#2CPdm8s?`Buw2A@e#2DHVf&uGv=ovqk4v0>}FaJ0w>{;V<9*xL-}&Gk9T%N@6r9LPgbYVDq%WS_={%)PWa4KHPE>Lb z-CUjSoE!Ht`wr9FDg1=@ZNFf*I?7ZgT{Q>mNk+U3o~Hrb_R{Q8Zj-43^%CN~dnKK&K%9pu-Yf{#t+ zvj6zlYzR@4+FcH%hw-+4S8J_EJ>-P0+#$8Nll`MaAzYyZ`4gxqlG|&}6E=|dbTB&o zS3hd7m1TqH46h#@V${h;>Fl3f4vBHad>B6cjGrU&V#&LZCELccRId~_3tb6a(dbElLAuIwL%6+gsHXlS z#?D?iDM17{4qq4p?gcidmtA;9(Y{i%shPL1;x+lJLIr-T%)>QU85tEcWb`VOs`5Ft6v)OIG+m5!fvDlmav(oD? z0-MVv8T?RM6YzdRNCwIRCXb2paJ_`L*`=*LYsi#xE))C4&A)0@$k*&z22FEnnQi9y z@m73pjD0d4Hm>F{x$l?1s4;iR(Pw(WF)weQ({m^ycOaF;89e$={IQnEhGRW9=hjS7 z?9Htc-)iwt&-62zxoR}D8%$2nslsWktx|L10w?=qqULW<2)(PjGw(z^0$S_hYxbEt ziPW%CyfzGUT}%`pjs&JgD^tuno9?2ApE(vTjC%-&_Bv5fe1SeQHUG2r=yhS+>6zUQ z2%`aBfUMVdfaYY(t_u;a`?~7DNP~HK^D4{~tCF-jTG%aC_obf|!FRbl)c;lc=aHA~ zF{QnK?;VRip1fF!nP`2)!=G6NKNd{`3oDZkLS>oOZRCyH_?!98nvUV=3x_$=Z1{PV z4qd1h?+EELo}7)946BU_v`{;4;@a7E3uM3>#$Q-YtlA?rs&$6WzO50fQKp8_8|sw{w|u9?PVi^zq!vUk;JEx!uNbU82lSaKFw{7G8l5K^|-~E3Q0@eN)pG zH{+OOI@QHj(@M?fMttYb3#3LV&C@4hN*iltU66=&;E{7YB6zK?4^PsCy6_3V;}d&( z%WYDOjIP=JV54djYTMjp?~2zGFfM9Eu{27a=_=7T5zNev;6d8-xv_c?JvlZ?W~T5= zM5m@QEzAg4j@Kf9u&AzBLxw4D!FW!Sl$AZ&EfVo^H!~q@R-YoRDr+h0C&;VaK+>re zUdKHD9i6tCV5fd(sL#2|+mwJd{X1=c$}{hesCZ5L(ASDC#B0^#G&sWBnEV4*U!dF% zN0?Lc9TIx5ac0sP>YE9un{6K=j@C{gIlunE{}oS5jag_yu^L zXUx0Lg$L{`v3t^9vc3E8>E|B`SB9#7f2($#f=(ydxufcI~b~FS<;njZ-)}R?e1g+tv6m=+MW< zr!b}T$j!E8|BcfWlKv6JeC>Iv&gG)c8AuXX21V)8ejhH|iRXsB!m{}9Eus$#0xP%6 z&$)lk2aZMu`561&virP<9Fei(z1?pAzs_{D=_)B84qyG!&fuV+_X)i}|K?00fcve% zk#&e58t8Fz7I$T7fN^%~@e5SF-6JzC0p+>@+g07cX^a)m5!KW2@6EbbF<$lmLuG;I zP601C0p#%uh$pmXYNtuQ6ji^j55nR#l7?UTjUs-qKfv^O1gLXRGZqSDpZ>1zIsSdJ z`v)f^P`d*__1q&cbnF==LiG>!X+I}!4LA7R88P3?Dm0TjmnyZ-UszRP9vPz4A7uW` z$K}ns!>L@J9F&f5|2@v|?jNIs7E`Kaj6*7J1HQ20FLhE9dW) zU@J?KykK)p4*;sb-K$si&={-2!yXUG zVp;OnAC|Dah$3Y`Py*zLAYO3}FjZL}uz~2&3XV*XG)HZj=x;WT zA|(9r-3?1!4>uxuJ{U&1yPQO~#cF9QRhIcpw9-JIY_!}nuo$YaO0PE6n)hiXp1DEo z@^DJ|#6biwyLNy?LVj=O1i4CAui2I2{H#u2c_{5`2u0c>D3)7C3dJWyZ`J?wpoZUs zcUZ1=wSUaHtH>=Ct>_cW%%y$SfdVPl>u8QHLkc4?E1$anf<`t9wl8KBj7{ zE2EbK$$do6VQT*)3F0>_<With*fSosd+nFekOyy3!&8LXmtbp%5FjWnDat(`L(=GwDhz?;`-QvS`^<~2Lr`= zl#Cb*t0o!qag+Z}PmD2~$v?vIx-$OI4Gjth{ff&f^G`Lz)Of7A%9u`EiuXSeY+`A& zS>{wWc$dhwWfAv~uEuQnKaxjnd<2q=wRb51XR;Q74z`>OcoLwfi1Gu# z)y=C9yG7h^l^NS#)8fru*ET3m=$E9%fQVmwc3`Waqm+-6t%-a zykJp`t6D=!Ql!Qkqn4Vv1`G3u;mP7Lwt;`=XLoX?b?DV zgDtJ1B*VJHHdm#YO88aq|0u_^*d9QkLj%7<_<)JtzoO~c*BJ$OV@9D&HPe=qZ!Km8 z^DKGkIP(RnJexh;4+Kx+I}z2qeL0cr$|xx)y^DN(xOdM{}xEt-iFAjG~7pZLYmYCe{zPgH;mEM*4c5bMh z(K5I7Oi`j>E7GtsL=)OK{&|(91<;<~yu>a3B;3a?ANGIw7tg+=`snk|GwG8Gckg4V z5F0obRAJ=QP%`1ENBP6Tla4Utn4-L1oY8ewXw>$n96?u$7vmXAep)3(3%bfzu26*B zu;a&MEz=7-1c6^m zpu;s_sq>C!Cfe=vjixrkyRS66qTPV=GA9~AL-0hqiY`R-+Ojv-<02ol&ZYw1^%3t# z5wc->dEHiSpQ)<>qC!t zab+Hczq>2LuV8*_lNmpP?6DrE`eN*9>OJYElQM>Fav4t9y{OjF2&O^XO7z?Pu6XaX z!Bxz!WHYro>T{G`ws#Inh-dT{>;v5;xRnqTu_-xTc4f|ql18eYcdP0iuE|%F#TYb| z+ui&ii<~^haa|lnl)y(IHEM-%jhs5@Q4gh`xo)w5d-*y3?s>wKEghS8zwCXy0U?B7aU3^=y9oSq3@P`C%_Ab$7~dYEL8CLOpbdG6pg1m3d16$=93dlNWpK!nz*_3-qM|Nf%Hz!uUDQb!>)O1CHLV)G32a)ZN{kt0Df# zMvE&f<7*&?>{}I$n~6cF=`$SgHQZ#vMF(Szh?H&^RG*)kCDTT0-1Mi!Dm6a=E+}zY zR_t_;f+p}f3|Xh-kr%~B_8;p1=Goe{$nf-wkD#@B1P8z*hsOLG4hEV~0~oJ8OGswp zN1s?0y|@+ssn9mZ`C0Wd&SByqf+SlIXBU7m+ki$AX>e`au7Fw5r9M!1sK&-}(jlgv zVrQGGfz?}zIJ%;U)4{Cvvx@?Kf_O!%+yxi;@vx(XQ;pU%((EDUC;nXikkG=sgoy|h zmh8FzNQA*S;hpeGWjaLX>o#HMh4&6p)1rqyLLw?D&9#wBXZ^y7{ztC!=z}pS&-;?;K`WtAjS=4rf>^3l{Cw_5f)DBZYx+@alH$%sQtBQq%^z2 zb%p~DVoyFV+QzDBIZKjld&o>w*?9Y9!`jLwSb1I z)as*A=nMYHYPJhH*utAYh(H&TNwWP5-GYs+^L~bncF)rvO;bl8KgGvPAxrv4DjN67 z=GI%=c)Nsb=_U*=Qp#r#rcXcfjvW1S{0AD44cy*G`p|-6V-C*2zc*qQc$Vmh<=Ou&cYNQ526D zl42*c4(8C)ClzEdY__mfI|%T2pqn0K6KoT%H0U}FQ`vreCR#Ex2Ivk$wg;d2uvi2cUm#Wp}7++ z3bM7B@HDofG|nQ^S@r1Q#&^kFLzh2bi_@FloxGqje`{3$mlCj>_aBK&aFcTd2%~cxoh^#x{X+1>i@;t_6-`0ie-J08 zI9pSevv&qY%-%c=x6tb-l4$tK_#o!gHXf6Y=Iw%hW>=mf2xGMu*4;Qd{k*InvTSNq zWXrwsXd&a#RF!@D?CYHnd9#lHNPdI64)q|58l9_5-U#Ww@vgjMvvyaBEkPjYhRadl z*Zq7{7zvj^lsi4I!&T?wcEIhpSj@uog8AP(lZF|0bK!zF7GDv0Ar%pBpCu$f;y`!X*P?)bEpw>m>2u!66LU~LzNbyK7#66&XqG+KmWOo?Tp*`*W-1>&XdiWg;^Gk z+j0b37n_5s|BB8x6Db$bh&e|+K#>cH(kk6=<3o9$=KjnrbTIiw~#daQP%(Z>t3ZqY^Xc{I+mV3?=sG4?#k{e_m9Z$_yO2~iBWZQ1;L4Kv}$MH^9k zI9rYJ)5Y?JBi~m_57k#`<|gV}sC7AdfTWj72gX^im~k|Q*Uh&w$SfFzbS|)^t-XL7wcaitN_p_&jq(F0nu1^s+Nx zxEm{M3T%9Sf*=1(ln3gnUns)Ea&UD{r{TPR&;~ek^HkCbO!vIhpW#E^`i$SUoaw6@ zv3C>QzTURJF^_|ELaOWgIj9M5YbT^)756l(Tg-)XqrDhK^j>{T=(Dpav(7xUX3s+7 zCh*S!h$80hul)kjCLCjbgk$nyim#w0Iq|bXWRxVPVg! z`07;^BWhTpZldj_p}x+{H$qRH)~PtbjdGmoW1-Q77wo67>%-+$K}$iU`+ZM(TI8=2Kw^BW&3EzSo(yXHg7EOTpM&NM z60<^!c@*s!HUyL!P4%IB-cjUWye75%;F8+J*(afv!<}IUvEcij3j9|F+p@877Gsf2zi!doS(f_OlcjlSN zuS{vGcMLY2wVL0L>y`IUQy17%s^4wA&Rr%rq>pz33LQ+TcQnh636nGO#t-OG$S2`` zJ$0}P0goGAZ(k7HDDH#Mz(;=J92Q?q!Aw{NOZIgHH92d;kU7wsZ{p^yX`2f+1>cFN zYmBa9bfGvN$8K;q`@_0vnCGpd@)G5_Ghhad|4#|JsOzC=W)6n5~E&DCO@u=?j>DhrV1o#4<`#Q zH~S<_!6V>;Os2-7qCTyOi0lX*%zj5YInZmebnHGPWmp-1Rd4>y*W|?@?-}wGND~3I z$MOo_^-sjwj}`6~jDYY?7>pT&W}nm&vioNB>~x3ZM!Vu-X!eI6%tWB}TWbPg^hI%Y zqVtBwUADKftom0#aW*nqk=BBXi&cwWzCj7}43*b7A}XU}4{_iPQxn%4I_LI*=0X1+c@wkeu)>)D2Bs%c0Nk z_bPK1L4-$>Mvn+Ai|c9cjF^};!YGXfA-FW>7LGy$3Fvp~K}P=SeiM0Qo0NZ`t{-?t z^!>{DXiQb7bD`$n$&nLvJ1>6Ku~{m{wXJxLzjQ$Rh2%eyd>|mV+lrjTNjqoLEd*0c z|65`|BMc!rrP`eqk&9%#SEAjzH%@gYYZ-o@iVDpoJxh74_;(|=?)X%1S9VO_?$`J_ z!8y&W3!P=jKW}^9N~lwYeA!2R<~|5Kw(9{dRAv8YBIV2)gpoMlwK7~Zx=yWDTS?Ya zK{<8&uem1{9qRfGvSeFea7&=`!)+ruSz z^3bi9(~WOTV?hlH<)U$-EfqQM~?HO#w#GTrec<_oyAmIryH z>^bFN-^t3j0tTfCYbzlu)yFC3Db^3E8cGN$$Om$RW<4+Fd4nofBFo*pCuh&`aE&idhUUii_WWVwS;T^##0@~?dI@tD3$O3*XE&=JvFiTXxPCuB(&7i|6bZ0Ux^TrDY zB+zxanClvP!F$zS9(8qo7Ysjc zx>}h}Oe^v;K29^4SB45@q8bsMvdRJk0W9yQe>7j}S7~lnulqDNPo0f3uc+OwFSUdW z*Sh_5{|QaEc#`UERT&xcoJp|(n<%(*x<{MLo!-Yfk(l_+&ROa0wENc-n#=e0XRZvBO@zQzguaH?`e$%|AA zj~uc-nyq&&ewCz@^)0Tj>g(X-iChR`ou=M}khcB^I`4Nr0h9+H0qh=3Gu@};{@#Qt z*CV>#a?P;TP5lV!JL5OLYzbM47miI*a{Sc72jQaia3hLZ*pnk_vo6zZ1@jgySBFSH zPxsA-QE!cPnIG%+(XpUEr6e)g{jCBd;4+7Rk>r*YP8Ab>?pRfu@e1RfRqC?x6#MXt zU4%h(OJw%S7?1x%aU>U5OowHV(OJA$q1S&&LZwbVd5>q((6Uv^*2d=0{Yvrx#P0QK z=T>YS-SB*GpRnw*^f9va-38>{u=@D_6q4(NXIKa@M}l-C?i1+T3UM-K)3q5&qWbMo zdsfkl%@&oLPKWPedIko5W_-U43rW-0k|LDBRIZV2oX$iNj2;qQsqQMuV7_Phc{)Au zRwGGMKdZy6!uL7LuNek}Jo?b$3-#3nC{84`ukX4V@jjjsK8&a?jy!|XTlza_Ji1a! zqLjod*-UG+&Au+1lGNU5!y7 z?S7|dsurg)O3>?%kosCb>3q{kb{5kt+_hGfP;N-6EtA~T%u`=izk2kCa2oDkOlPCR zUv4r)&6VV5aFNv1T#slu2hHRAR%@Y$bNwA{T*0cnep95bZkz=|_RVDDu3_oh9RCb| zp&fi)XJ}_K^dX$#T<~rXBb>pXLvOa-VZi$v7I=Lu_Z)l0ey$!EfjQTny zlPRyx_acX(s&~)3+AWEc#NHfofQ=IY!u)Ex_g6TmBCj9n=WTz=Bi(HB{YQ)dpM#0t z+>fM-CeN+aTI!$8cr}6o7TQJgxk+tA`7xF~&97+J%s~5f1Ky->Rbb7nCvr>ta>Y*U z1pD=a>m^Gp!G5$V(@2A)D#70+vq{Q#y2f4PT7hrxD98SqMzxbQReq57OWrpZBf}Py zz>KT2;Mu&w%EEr>DfA<76Xvg0)gSDvje^kqv1hs(oeQ(6Z{GV*b6VB8NpJJ>>Yt2Z zl&7=YbtKzhZld4y4wBL@CHevLK+b%Bzp^xO38U)#+j%nPQG~P2-m~p;9vhi6IWEUq zxG50Ib$|+;W&weQbXeI?oi|`_#Ye#)=tHZ6^QNk>;I?3S9S?nL8xNoaziJ5?!dCIL z=R_;8#4>Bh`;62b4H=AxOr(&PaBB`Ihqsqt+9iS^2(6N#mNC~kQ;8y3< z@kS`VK({KKzrdyW(KoFOa`%5eCW(FhtMPkE5WfXmSEu()E!8t;b z)Mwth2CThJR435B=rqEiuvVD(PQR`#ALzmHAy601EQtO^G|Uy53*;j_2C1n zH7y7ZMbToIFs_z!mnc>j`F7&Ws`M#;nM$UHo8p(l=x?@kt2a!#Qp9O8HocZ|CsI*j ztLG=zh>@K>eG~7!I^}#^Ge(Da{~wgi|GtKaW*Mafikajm@#6m<5m~o#d zG2!XqqptC>P(f%mK;VhXIG9mFY9fzOxJ72z#gC zI6KoKnOg_^&0GNYxh-Sw0yRy%2UIT#jctz!Hq(k)>$2>?)ryyD`a0BLkhn9K6oWVW z2ekNapAy9W)FMdyk?Y63{uAC{a>;UMA75WfiqIOAgo0Q{+_G0vCQU}e&qqD&)j45s zewa%3cu$~BQQm@2i55NW4~>|w&&>t|tP^FGKD~R}mI2^h6>)e^{C(J4oYW6o=Xw=3 zJR^U%$)vS5Q@Uz5Wfmo|wA8n1Z$5)==+b!1#}vM%bu;~HMHEXTI&uPF)wfH^yAGEZ z_{&cUYJJ_>Tm>P1c4(X(wM=A8xjF`L7*&F8Fgt`tb{*-fy>6;#(7{EJ8(NrpVtvx5 z^|_YkI}~4!22Y_~_X|6ZRo*PBhzNlLt6Q$S9G^fKqvKdxCEPcV!|H<7+G^y#We4+a zE#zOw?gC5ENc87cVdgJfWFH2QPY*eHSy5PFFFZ30juDM=uQ)5WDm}|^8lwF7g9!c# ze!~4k=Kk{`n&#E?e^d}s_#b9y$QH)7bX_=*ja+}gP2NrY%#FkOGWqakw#6p2|+Nc!+Z7rADYEw5GD_fxVKFV*{r~A6hOXbRT)oXGk)@mE! zOeDW9MZBhoWtW;hgz;}gBWU+5>G!RMsuq@BwZTBAV=pMfO>V+|t6P8_kbh`y5dikg z*?u&HMolrM6xkit`7tS^L!ZlEhiTNA}0V!Tn!^+mxf3c8I#O(uR7JpxsEOjjrq&5NR zp(@J}a;Nd1K8U>r{s+WPWF?Lv?)U&8So&Iuf6G*=AB)m8UQ;Dpa6Si*9C?)GC+fb3 zS>w8a$G+Ad;sc6of9T^H{NVZI6X&C!N7hfDB42||m2Sjot(jdOBRZ`>YenfmXTq`H z85pWl+k%S-5`Q**r@Si*q^8R}M2+Hd5*SxuwU-fepoV-J^8W$PIgP(>U#E~FB zg-0OAOQZ_w;vRe9bH!M|KmB=k<3d}>wK46V3?HZZ{aR4=O*;3z)cTLa*8|MvS~UeD zEdWE$H(O7pQ>b|7Gwn6tj>su~KBK5GZ%vn^z#JJD26k~lspKCJVe8w`kDqR?@!#{H z3iRQeGgV88z}{d9!?S0dNPB302{n`JZ~*X_{Vw>21i?R2ksAzGNwyTz!;%RLp>_sY zJ-5H_khPIdo3#)WaBnb;Qr+H2gd$B<_v32o0raoWE8HZvnTAf|d^eJs%IbfK=l94>Nra<>21+Rtnu?QmNE?#99QB8j61<X4M4^8r5FMzhfr&87@bDv%JJbqRCu`=6f`CMdv@OrOwl2-;`|75PzP(L^mYdE?S4^Q+T2rpzr|34dsBXUPX|84d zQ*|@94-1P@n@2fF*Y-agkqIz<+(aI4Hvpe++Kw&pzjvXN9TXi@j=OtK*DItF^xYwK zs3B@?c$?hpYsqUGfT5pAvk-A!^9ddCsZyDqz�&)M51K=Ei@q(tL#m!J=s?3IFgJ zhEtXSK`+wCB+b;a<|*;*)ewySI2~~q^b6DxY}AdYjvZmh=AAjy2{toq(n<0uvxacR z%zl*|Gp}Hn|k%n;Jmk)mMs>Xx@@@RX5));w^hVO(-wO{P7~FhohFJD;5N2 ztqSqgS5L3ATeLPt!Dx+$V5`;PoO`X# z@QHG{Qa7r*!Amk(v(#CzTq)_$eS~NSTH6$*an{8vTHrpet2c%_vNVZ@43I8?BIFp) zlmd`zcXrag<56e8P9pM0M3i)!49X2)u>@ir>uMg4*uCz|*a{%+>?6BL_9vVGN{qBt zU~sR!%DLBN(Ak|)xjI-kaXBWfHRB7Kjv!LI|3~so2KNB;j}c2l%yJjJVn*&%6RExH z8tQ?sR0el*>B$S8%JoODE{w{S)&N1(pR6E9J_EIfr2l`x?d5+Y=c%~%AH?x?Qi3bC zzi0Bxdl-Mdt&8C^CHOd%JaXnciP9({8ui78<{?Z2`?GqZG6=d*TRwlh6TsxTy{tJo z;P>q9y=~+_86cMY{*T1{7LEze?gUKNpr71l6m_wt|5(0v4r>&c)29Fi#Gc=mo-r6W zR2!I^1UXA7nHfK`)pqdh6xWn_Ojg%%BfhT{&e|nwl%;W=vlfXoN_X~Ea!~?-r?}&e zt(zh@`ZZs`96x4#G;NE%Ξ$x52WeM_JmqIFki__4Jy5xOBg^l{GYo2vfvYqFRD)ni{e5o}0>^?lO z!}`E>Yd!wKUXxA!-lK(>IxQxW>!zIpY+j5<)xx3ThOFOBuN+k=F~k2y+gkv|6?}Vx zgKN-`pn(Jk1b26W2ZsQ`fHi}Yptn*jyTZ6uDHu-cff~+k>XNp}*wBb54!X#N#{m=FxK5cXL zPVB%IduYS;ce&DtrOl=!T<7lRzLF#9bgOS&+G`TxFi5Kgy4M@d`oN2(DBJ5;UM+5I z!ncG)*-+cR4Mc5`)r;v7NxS<#ff9naLi68ZHcE#sO>$Y<+^j?mlm+3od*IJ0K6ANc zOOHuf-xwlV%LBpEYwg?HI^8GY13*+p$E#c{^aq2+i}&HRcJZgE$cda7E;l210)y29 z)PdsX24{aL36rIKBs#WNBCE<%i@{P-6I}J=}c(=C|nHUDb=#phegy0o`SxF~e~C+3_&01wS&!vj9^s@D1bn z*$D--5jZWhY-yXW*n1F-X_wMoh(tgBz65gdAV@1peNuBs8hz z`r$LT+AG<}bCtL(idjA0*%bA+Z(s;d&Ko0V_@5Lj!ahNC z3n5Hi>$rl6q%g#t$Gy&F98`K690i1BsMdh)_qOmV%YACqxLjF#&-T*==X|PJSb_oO zRPc+my1d(I)%yuVv1$mcr@Dm97+D?s|7r4YfN?toF_R0dWGU*&cZgx&-yeyaZmhzN z_k~F$1yc4w-qQqJ3d4DuvM#Foh;N&55lhMIi*(axW|m|~t}Wl4^aV8|r^2?;e$Bp# zJW}rDJlA~jQ-LqoHK>mAK9+eFvgxI7Q(M$GR#juREB!6uI6*?V87Z4qpqs4w(f07* zbJm+Y6+T|=*WmE&=`I9gUGoEPFEQQ1F!Pz&G(+GM$Y_9f%FC)pobaUHGSSj^;dk5P zMw=w-CBevFE}2KGs6GqDA2AH*)4=(F%D*%DhAVf~#D|AV0RGnixFj-6AmGxpd~XGk%~ z%}mBhp?QTgK{@|N3?HTq0-A?GP&5c+aLpADqJ7)-0_t{$h{B>NUN9s#{ zy!lG@3EGr4Pp?JA8rFx^0njeqAJ7oZR2q9&3!zE_og@kdfPy`&{ay?>jQ} zG-o>S?;f#y2-m2~f~k#=i@jr+hxMg%go@Ev$NXLHxt|jCTRC4y zQAbu2wy9goOoh?_rx^iZeL0JA`wtXa+Q@Uq#iSe4$24G*b{4os2Kb(|bElxvF$kst zjFZ$7^=M;VeHoSt{NQ9P#$A_^;{+1cg_%QRGB7IbI2c z(o|IS60fM72IJIHAAxXt^AxQ}63HSdy|h8T2bF3P%jq%L-X2Yt@69b)`^iwox;d-3 zlguuu&R_fbwNLD~-CRE=2av$|CPi$v9T&?D+y0!gITWM^+=1Q2P{2Ul>8VO{7= zlEiuoV@;gu0H-Hb6HC%6z$oBiaGml5|Xqb`L;4dKXb3OJF#Ta7^A%t$`|H& zI%|rg8@$lt0zOv;Os{hC3Ib-tcpt+X+6r6FZ_-Ov;&4A4g{E|er_jaBGY%5|`sUSj zEl@oHJNmk2`?aUtyfFD4Q>x;FKuIY>yXHraS%!i-tCLPD=aVm_K6h{5z588g_r3bG zc(h|+vaQ4arP*e6l=!)%>T+gZC;k+I&ui~iz6~yy+H>~?0e(zSS2D!-76m9O0=}v^2c#4x(gMc8fDzte z+>I72p#*LOQ#E5C=0gKvys zf>3;e#~<)#n-4HQldYh=FF3r{t0(n#^VCa3(&Pq2HH^BDvfCg9#e0UHIm*f7(9wEp#~+b zm{pSimcaGJC;Gs`)ea`=425jh$rTM0RVSM#l`0Jp&gzfIFJ991maHK4drfE>?+ZQv zGh?@W=bM0O1nqn~@5T|JyT|jw|JY%>C?e8SJ7#^0yCwI%oB(7amr(;E;$!#{tn6=$ zxA|p{P=#Dm)g!~+!*lN8`grX|Oo^{oDy|v9o}6^&2YEFB42)p|&wRHhdLLD{@H*HI z*o$m9K-gcFn;a}klj!z91E7Y$Q!nZ z+Fr*Yu!tXC9zuYlUJCR1n;5olMO-63y8+Tf91-rN)sitBN?s(2nO%>5b<@I+gub9C*e|9}FU?lR(k z%%yFEIg2k{oe1m9@-#LZx3Kobtfbtci{D$;7Q8WwDdWs(xJl-~X3P&1|L@SdPat71 zj9mv#bcw(|cyI@IEBcv1^*4)upJ*DNJEW52ByE--`IeJz+G5(9B`Qa-PBpW-(=A8Q z47~~@h5?oj1aP49Elt~$rPeo8nky)7e;6HnZXS3H_&k4qJZbcuTd<_0u%33YeB}wm z7>5V$hCt3gR;uR<4Rre(%~K-ok3I;?@5eOR&p@yDATj~O;O>{;jlw0oxH@&1Ug;-8 zZad*Ko*N{Olc6^oDFa4A^Y@(UhFY$N@k@-QRpwwEBcLOK)evjIQBIcT=4qaFqpKW4 z@=Yd7O+Ca+6TRmLPJe;q>>;%Lt zf2>qP_S)kSaQsYcUDeL;a=t)wiBSfUdzYruC0i&+VRct;1n#B;H0vez-^Pk6#4)a! zaX67HCY~>ueW21sxy+19i!XY_@_U7jc7_V>rV-yL3&79zGtTp4oU4yf8OBKt7S?}w z@3K)GKgOdIuJBr3U|{WNfck(~#$!w1lK7)YT`~0leEgFjuYhpBDxkxsg_*;Y&mjmr zKA3tzkCn|&Sh*Hq&6{T$+lmm0%6Fd&S0=cjWJ@5E;N~%Z{f)RN>T_q-NPL)L0M5bv zSEt`W8Tz)EKT|%^()(r0XfZ|a(+aoZ>RhkN_>D7A%a%K6y}tMePd}6*id(X}7_x`< zAaMFhe$|ECfTt_4`i~-VR?05KchhDR97MZrtHq)F(cq1Z4$HCW9%H=FbSHka$fe~E z);s+iAL@*_X+%ZkjBM|h^IM*mFu2Q#N5L11dD?P&0k88wP&yo5=uuB>E}4k<)=i?+ z_zho1U%eBD=t}|vDG(J%xNc>BFQ5f@7Q}&PAua8vkn!(FbCUSG&#MPhEgw$NR`=23 z=qM9?Z~=VXq|rCrEv>53Zo0SCEeP`opq3F(>*vAE1XWyjw#_aBYG$?oI_W>R{1~N_ zbFpzi30t5<;nHtSO&D{T!I{$s$*a{!ek*a_g>g{z)nI-rHNgx*8_+7HefOKVbQeFD z&xO59sc|aMf>BkyGhpIrYQ&A~pg`iqEn$d9f|mlzHb60sCa513adq}JBdYl#g|69_ zk^?pMZoJ^)a-L?#6f*THGHw6*#iViEU2*m}!`@7s9NC*ZZ!&(i8TDDiG?~TReh?jt*-2rDN)XQcaGU=f z7F_qS7SU-MZ3o8qWDFq^Ud+HvHF!7Bv zNRxjV*kaA^ZD)P2(I*wQ%3ON+yM@YS%ACcCSD|f>(g-Z=2iFzd!-OTTSBJ|dt>5~( zAKn%wIG}0pekKptbiqR1Fq?ujVlx%x&N>{>14jaB-+gnb{d{4xm3}&pv1|xDIbtg9 z`Y7H=KAI$Vt6jyC-Ylw#S5vO*yl_p#GUQyEdfD|R;T#wzV(Rh3Zq;DLa@H})%_(C6 zi*}n$1MMqjW8pzD%UY#fJJ?Sk$qeSf7|SF%Q9C*-Ug5*I3+YtEGeF-T3|T=OZf}TH zx44iX=8K?L;&undRslWK=ZfnvHvD5r=zPyQ84r7~&uMj`_3inZ(cgrlkBv5NWk--N zJptht-k(eKTq&&ZFb@#fGdiL^PURRX`K=a8`0kch;={B9kaIb47(2MvF`5r}$Ysux z`uOKRy(s0q`Q})1*y{eLCRnMlRmt44bBjYCl?~$u3dnyQnuNdtHj8L{(|(SdkR{C< zdl+r`LC%{CqKh*X+atP+VK~7!3h#RIn=p~*I2~H2)SL{DZy2kYUnXoGDRL#k9NIH{ zf3_B+k|b-CeJ0vnR zwPeyYDwcn~pZ~j)LjXM{CW(_s|DiUKKxa|tBpkCk=3gM=9rcxC5(*rRM#1I7|`d4vZ?)?-WwmuOroG@40qX|Z1)!F=L2;&H%yGUl_Q z8!!s2D^jY9^Yl=Rj#5AA3s_=m7t{wsn+T+F6*#D{H2;7V>vi4>Sm8f`*eO!Wr%yk= z_5LQ;7LyBTA_%_e`wWn@8nLMVaLlwe zYR}5C-S8IQ2wXqc-G2gQ;8%h-EK{FALk)-~H=RFHS*9(f6|ry6S`bQN&QzfqahnrPOkdq(y82o>OfA|P@gfk4PVkpDXe$sG8F{P)$te;1!l zKmuykj&6>w){agz+-w{m0VxG#)PH3F`VUY4AC_Q-Dta*vREv5Lh?RB>lL~m+LS|Hu zmNr(?P?c5qAoHJ_LMAnFa4aOdDH~;PjAQLlJCrLH65C8c8 z{`|-P58cfDSJ`RSe{CH<7Q_mClk2gs?@;wwB4z)da{sp+wz-9?8E^#-Bn~qt7ofwD zSO8wl)6MB0?+5VDU4WYc`1pUk)qnAY|9I2?;+y|dMoU8)C^HK1G-lQ&<^X>T@GPeP zr+lmb6K`+l{_lJL(LYIXEgZBpfzRhaq6EDM*??R@P9Qsw3CJ5n15yKV|38%H`A>Oi zkRwpq8{`7ivjSOz+<-cgK+eAwEC5~sqL8Pjr?&seQ-pv(HT)nDcFX_dF$#b{gg7A3V2hK9i^+eQLjm5AErBQOq8J2v zrUwEM{{iagyIOK{{pUIot`rD_w)XUN`4$AiNCJT#IG2)J%xt!ANo&2z#9@W3Mv{p1|}9Z4p5+h0ECQ$f`W{S zf`<0*fkg5Lu7gks(THAiN}?01n_#?ge$Ew?kcUYtRntwPF?mkMZR!$?h5h0sDH%Ea zTL#8=Ogy}N`~reP((h$t<>VCHy-JnOj&|S=)SZb#wRd^z!xz`T8w1EIcAI zG3k49N@`kqMt(tI(a++N(z4pR`i91)=9bpqJ-vPX1A{}uQ`0lEbMp&}OPgEUJG*=P ze-93!7nfJpH@C36`+ssFfl&TSEa3gWB>TU}MF_}+jEahait$e_BxKKjf)k>mz2-zG zl2peqaVCDl6@>X*Dj~0?8;h1(NB>Ufj{f}JB zAZg$i{*RD>7YZ^m@UKDv2o(?kAT)He{}B5BB8>kK=0C#v-}D4D@}Cxvkx_vc7CIXG z|GxJBnt56U2C$b;OCVenB;a8}Aq0to5JQEn`YsxmE={C^G+9?0U?!K9H^XWx}qvSCL7x^@Xx#GATNvA z5|ndU3E-uaCDSf2i}vxj-pfzK*>>Dr3s@nqgnJ`miFeVGrtI~Be1@vo z-zzzO!R56b=|7g2^454$U|i^a?tQ0!_V5G}-Vdt5Rge3Sqg-R3t7rJCQR>F9du@#t z8a5TqX@*!ta@IOq5ncMA5aOIeR4ZEJHwUI@8lrtJH{7&gdXMm6qed2m#q-7Lfa8b# z$l=EC%B0_MF}k}?mg=Lcs@?`>mnT!sByy)Ca|~D0ftg~4^T=+ua>>?(@Y=q|4U{0X z7Q&b+Vx|TD3S%eRZ{wK{(0#r>xcY(nib;kJKeO_E8WD5yRu-mfC<)#M(|fzzFQb?v z;e{acEOX}rH5odnUZ0&oa)d{8tUMK9S`Uda~U}#OI-TYQo z(4mDly;SCT4H+G3e{=jcEgGKdz}*q-FwNZkPLO%9Y_j5~e;GGUF~($=DS+)S0==Is zw;9(8#wH$65iBF6ey!POq&u}jVJc?4dH-rwii?jxPQ!WP3n))mn&c2G)7(Pe)l<4V zX2!Dz&sF0KvrEm}{GQuid@4dz5Z_lnuAkSvPvzfR)4(o#Pa;a;3R>={3C8egAR}y# zf62vN^SQ&DHsK&{$A;#e@%vCsCYMCN?Y}FCU9Q>)F&h}c73{5sb^U<2CjZVC_Nmm+t0txCr&(xpMsxPaph^1Za zmo$ssuNk6rtFHIsFVtOxH7T%=&`woqcWPj=lp!O<;*SC=QEigTl-y0d?aqwx)766J zrg%E2(B{;8Is!>h=<4$QeAO57{TrhOwhY}fb+UVVwdS)yUiiE2G%r=5p6(NhHXl~; z7hWxI8kCkDVGM_!T_--1;j`=(x_QzqJ095K!YQT_C@b@eVn|Txe*C2ngQQLNIY}o4t2`dJ@-$e z-+2Og3do4!0g7InM{_tX%ze`_L>#XIPJG@YtM6qh!#!MDH<8AO&z#%d7x&?c5`^Zv z_S&B&RrEtKW9e`~;a=6wy6*2~{R%*9Md9H86aU!>Gh@TiE&y4$zYsHq(F3d6S{T-O z{IXa1mf_e$BwT?qcy~WVM_$84%tH-tf4@LpVdp;UKCEvYcVKg!;9!jo=7rPeEu^=y zh&oKP*jxTugc6r>hq}^V*mDCy2@5snzkP>n&Y|Yx`xy-MV$nu6*YFU{%+7;Eq>Oz{ zcAohMj{Pd{{e^jG96^7TsN(ZqC$hJDDY1{I>-@fJx19rjNhuo%{EomKkNevg1Nbny zmw;)7pXaZ~qIRmP!vG?vcc;=rQ$yOAqZnwN;{850oqFiP$a-|fNhderH#M5r8?XE` zqnWEH#}oZ>=gg7rz;tzBUp73E02P+@OBp)pVl@Yq5V2 zgxz0P!QYh%MoG42CEjRqQ>9c4gy5Xl1gi|ZmKzQ*{uAD=aehMmg@9=%(E~nE8|ZpA zopHeqW5VbfVSf%I+BU3@Xp+H?V0FM^A@=)H%qzCuE!6=^+v!}=<&@BobtUiFvHjJ(^n5~Gh z!7jL|7f)mVESRjvf>V7E-eLj=XVvR#u(KX(t6Y_~=3LW2zavlAJz3G=n309#R}J^C zp$uC;xa&1m$hI5x_+JR#VGhSt+W{y!TSKopd6@Tks4`ln^HiReob2h3O%vLHD^~ZI=?O z4=xZuFT8}C0ZwckpFn4Tf^^fEUL5`JrLd`Zb^9YzQ>dVYV6zfw(kDAGVDd3US#P3g zg3?swJM)r2qAiPk)EPiUe9AXCQ5X_1S-BQv8c0F?ywOx$jX`q5zzMs3FaUSUioHXO ztvB>1pRfNmLhpd`2VQlM6+%_=?9JR?N9oRNw5CeeMPBfFz2+LSt{JA%i(A@aj;X*O z0n{hwy6iFQ1Y-2vKLf_s@vlb4@DP`BjWwAXZEZw{E~+I9`sW z!@2ZLG2}6&o&9`jyYJ=AQE@lP;rfsb#xncvbO8?E(L0egH$!R9oKQ8tbaWKtmp4=n zmS$nBAp@G~yxq;3Un>0Koi^OSW+z8Sa&(o=r*XaFi$Ks|Zz%0e3T$(8{oB1~5XM)x zf%CP7(nEqTL?h0nIBmkBe20r)O))iu-_f;aFj}&mxR|)szWGX>fdKaT-Hu}XUi=$r zuvDmL&qr~hH|^6`!VQ@{H=eEJdS5)}Qs;eg!e~{RYLvZAQxcMyCNa3nIPj30t{NSv zQui!r5@v3B8FHN}_q?P1@l<4#iWzQ#k7{5^55hp(S^;nU3?F~gQ!iEztmEv~&0PB{ z>DTA#PFJPr>&cjTmqhXF*QEShIg~jIkNf1W z>Spe6B_at0JVW!H3n>Cs z@pxV^<4)k>KwilzJOU-I9ZxjY7o2l-kWb8!@rq)uw1zO5$)yv}r;<~pfevQHOs**r z&BX+N3n8Kyoi<%0Z~4fUIBX1&EK<#0sHPD%Q=2KXNZ`uKQCE{pdATSv=u9mQAN*dC z6-i!G++n9BL#aWoxmJ=tzU_M+^%kA0SOh^5Cf4|_w?ZnKQd4rMt0YC+9Ty+Hu|AuL z`3ZEDtpJaboViU}@b#TeYTS}Hq?T)$#&kOlSC=Y_1r0Q~y2zJ(2>V8v`{irZY5xGN%ud#=NSx#W z!q?vhw{a86NT-=(qP~LOhF*YZHyM0m_J~pZHu0TDd|`HeeGPrvO?YA%XROYjT*bbK z3@^`WG*|e&_DEZxx$U!gVc(`}p@>M^D)OW_HeFGA_&t`>V6=f@$4`4% zN%m%?-36R%B0p+&i6`}IBTmtr?`3BDe)rIws9BAhr<46)pTy^?P6L*WAWxI5CDQEE|gbu$|hqoL1qk zG~n&%HWVoDusiKh7Su~`oGcssxL=n_>y2c6npxZXTJ zKP`nmY!ydBtj}KdTeZycGU(C@m^Vcd5b?xS@fDBcaQA^<`qPzKw2Oy%MYpmER48wn zU#WNO(zWt8*&8+pIceGRYY@)36|Gf7W1Ro}?Y@03XwW+J)OcxX=cy?k?bdy&MlkPe3^zkc<5ZW$%72Jsd z#Sa18oi`=HatE3$-2U!5{yG@xQ2Vwc`iBlH>jv-#FT{CQen(6{;_M368cB3*SO%v* zP-C8V@Z$N5Pr{c)ZF>Qrj^j5EBlf2AxX^<3SvE`h!nRN>_oP zTY&X80JJnemkTQj>CZUM5E)7Tkda;4*G_+*Wxz#>L6srlwbibpf6f<#mob+DB~wAp zGcZfO3qA)teWOt7^ivb@Og^g|&lHVl3)S2mK`m|NN@vt6_uq^&n!OUh!ykP&Bu7jy ze6#cz;HZ2WkQ_1mONF{s8Eianc9SQfS7e*v#_z#R7r|o7^!@Yj@F6M5XD3Mvnvf6! zt(WTAmfkZvXzEvI#-X`bog6lhTIpm$kIb?2mCL<4E-@BAa_0DZnM%}CAG%}DG8q^BHll>K?}CR#M) zJkraQlXbPX!YpQ4;F3N-g3Yx?f`N?2$A5lP97Z?a#~$5DQzK5+N#kGlp{W<#qW3d* zn7S*+6*W3nv+b|#z33yL&9+ee3a%Rnvny07XG04Q=H<2SWn1 z)k$Q4Spy&tV(ImYEOt9r*~{lgA29b?0yL!`V*{qzmOk3Z70uok`TEq|U4UV4jr;WJ zm6Y@!Ead+%>gL%;#cdk=qQlpCmwSF<90j=|h(f?~5p0lh06r(pv$xX)i7!A9RPrOd z*muPH+r+nscJXyEZXuWA+^)*21pp$sAO3gY`;ua;AXPxtBfrmGc0LAFn>1@3`b2k|`;#yAJ*T zlD*a=Nmn|>NI1!vrR3WIaSxazMf&#A3;e}*Uh$cUEJ-LC_qsm&fF)H|g7k~~kFR#G z-MK1nG#mEC%e+ z6kG<3eo~wZ+EQcW#55ZZGZ?<|WOyLb>#WzX$6zimUw# zs;A0VXpcP_?7zvncw-zHvR-EzC>0-gu$f!4{t?2p|48#>mcxnwBkKDUS7~;zaN)rkkfQZquZt>&H2QeQMmF zgDq5FN4>Pea}l7tI#B6SlaF2wlbl~FZGUn8F579M4FlpJP$?6~$&zu=?)a=r^sv3= z*+V?Z0OamSJAG<`+A^l8nU=oxhuenFw30s7v2i-Q0aM7WDu7-cx$oUjzmxwEFI|!l z=&k7hD)cXB?!KOjVLI`sla@v5`mVslpPNe42n&0Sy)~9{JYSu(FZW-2gagz>1*=BX zzqfm-a2nZ{b5NuTZg=QKi8F&Yip9idc>p-+3u>GzqRDmGI9x9b^^`ygwe}U}A>vc+ zjY4$;*v$A_t7G?Bh>uYWE*46-b{4xgsYR?$qQcNy2rt;qx3t~t!5%IEb1S0@{XNSr zzoeI3+??c_^Mh^KiOYG2%17t;2^6Lfr5yV9fIOP$ zA*7@3@Pd--`zAFreA)Iwz6+LM@4nR*WX00YtS{LTcsTCe1w?=ab~s-y+vHEKD6mc} zHMSoOzEmbgHKb?l=Ri`6d`0W$<|j(HgBr<5`8a1lt}wjW{rj<)d^uV>St%TjQ>|DP zU)Qew$A0g&C|m4GKuvbNFCWX|ASmFP4$~fU!@(djQD0x^y-o>S6Z(OiS;miJ`p)J3H(d}qLMR6|?t^eY zkBi`a)az&l_8@)>%NI6$UFW;2Q{&Z-_ zndj-)`BT;<7IWXF1-H1Qi}#-TfexYso3drhG+YfqE>Y`SRpAL2cPDu-*^lz>MZGH=YqOCi2Eigam#$E8SzqvSBSm=ar)^aT3gt$P zbg=k_VD);o#+_q*W;jJq{Z;k;&3fP5y&U{G34BnckfvsaUMoJnoGH_5GAj%IBdLoF z5%+g{LNoHbirv9+5YEq4>NEI2+Y83#f#ATnC>{-0-F?$OL=n=yo)|5#KITB$fu2S7 zo*Ts>uXo)})(-Xs=z&O0F-FD(G2$xS$8OdM45(CfTYbwLsgJU(k|uDvP$p$T)gR7p zGVNIs`~!pz$qXUwP$*lunaW*!4`)(fITu z!;N9``q^HN^}>xpq0DwHh$i4W*yI3mdF>21MhtM+%Xm7*EpEv^T)lU{KZ3Z%G8w6?%4qIiB*%?fUqW?uJF@0GIRyF8}keA zXtVs+mo#*jRv6>5`(m93P?08#@Ef>VQGo5nl=2Rv> zQgaiCf;27uAN71ey&tzkHjl`fO`LQDJXn7@+n1T&kZb|$}lSN!(8Ie7-R+z<(x!8{3 zMZs1rmIBDt9O{l27pomZyc#Ya&{2hh6NXM#72x-3XajF?S;8E~F=?|hlEwn%+JD_p zxjuo8N{!@Nzbo@|;o-b0etzU;k=CB}u=4o^-a2Rh7sGqgR`+B#_fxYa*Yg3I5}cWh zEURMUFZ&|WKJ%X{60Zz!eQ=TVsh4U-&88~1iOUCT1RP>=^e?FA>UjTFo)lTnJYRC+ z;LMeb+%4`{ER40tS72af7wDHfIQhi%r~AcT7j4tsi58owznCU{_YiB=z1wveKRaJx zbRUK8>Fb`FMa60$p z=n8Bkc`5IgGH*^MZtE^z!JUK7mYU#;4wXIqJ9qElKg<(`fPhu{e`F1>;qm$k?E#lI zIO|!?S{3dXH9>Zj6KE2y>r5DTd&v&=)zo%$Tqz78xjnSg9cKDx&scjU;A2TiIj*7o zsxY;!43wX$jRuQ+r7RAwS--mWJI{lzD;|dI4WxMsHfFn_MXK0%~vyyENUE2_sa^WjU{1cWbf=edW?^%zsv5o`EX z(A3Ueedww$KmoCQPvoV!&CoLuqfrro>u_(eG8vbQtgzop`fB+@A-L_TdX^n^<i(*(J~ewQ7mlBBvm&l8RV3I^dB^_*0^RBKi&N-t zGV~o>QK{EiRlILvGVIdBq@W{1Tx3z_y=v(9g50G3Ij%UZDoPR9=Bycl6%d&6Q1{tv0QPYVT0KBBLmHcPLOj5F>Rzy(tSR^W%7|Zs#wvfqr^V zUk^o(q3X{oHkK`ZG>Nkva@YVV3OX-)i|7n_IS;ryle!007UucCx;bsDrms3(uDfR< zak~6!rOdaa4^B9K^#+z{1tspiv`*o7jxg02AAa~nfg<80xM4*(-%m}}DP-e=c3QJJ zhT(d!^k(4syj2M6_>_4h`&n7yk0vSC17k(lz~;d3N8?Slye-GYHvPt#u_cpJmNnjX z-7syh9*5|V+Ym<;g=^4~iFQ`aM&bE6Ut{@%sH< z>ZsQ2rp3vRRIg@UeRbd-?P+GXH7holKjJ?z*R=3<|T=v9sq+}SlkiAQS*F9IQb z69R`mIz!ax?X129u(#v;#v#Kq)lG)0=nun}RLd0AjG1bW%(wV?fZ}+9Fh_gzJJ?f^ z)wsRLaq1XuMR50X|Ir%}R{Gb+YuZHeHh5#_A|UzjlL_$$hR@wkp!UFEA0PJ(w<=HN zi?o5=&kiR+hWp<(zuPC}rc08+YvARNucNpwMIqg+ub0V*f0+r_Qd}IO`Sa)L=h91< z)-<8%H0|de7>mMG#=7(@#5qN*nkxON^PTNzspKa%4JVIHCnTD3guQxoF29)zn>Km9R0sW&Z*NkVwJX%-zhn*1b2^oiA%&RFyjZK0_mM!9$20w>M0m z^r5kARCKW!^&vNHR-E{Q@>w=F8un)M{^`|L!&M7wJ%iBu8t){oyE^5d@FA2hoJUzY zxnJ1#hFE(~MZOzk<&Io4Tvoj6zpyLil-VKw{aA|d>a;_d7QH+g>{Gj>iRqMh4iP}1 z0+c)c?ZqlFYW_2%SAzRRxV@dfbNPWa!+hnLeK^~Sf@n?nZzm1~(o@d3edR`(R-zSy zm|f%zt14CS0D9u_kZ5ii-<}A{GO$dAj|ofS@GgHEcixj-K#7&bhs`L-e@`j$j2>&l ziT}ZgA_DTsusBv{k~c+RCNqeo@O1F`Oc^<4dZQ^~d#g_o_KP=0sDiI@w|bl;n49P& z>rqZ~{u@i)PJ32bJqaVs?Vn2Ad>|^pGM(Bx!qYf({sB?%LF&0meYw_EoGJ~{fz-BY zoSBE4iu}G4Be@+17GF09bY@#5OXwoUD0fbvlM8s_OOXwy<0?N}#X!>hwD@o%gPO}g zb;%V~Sv$w6D}Z6>V-B}3tku_78E^K$B;78>5W$1dUhK8#_op20#)6_hzv5^UZ&esD zcF2wKa0z|Cl(mh*u-xI|>2cUg%eawZ#C_ZPD1<-!Z3WNxW9TrBH+>Royvj=kr`3=^ z!Mm32>U|^SEwmZdDmL>7CCm^J6AXHOG;AyZ%k6XYj#k9D*NI}^L%**t=QKT_DGViv+U_MU7S-4aFe2QN6mQv#|}qb@o@9=j`iP`;ZkP~j$5tG zxU|9g7VLNw)c%FPJB4KRTS&HZYl=_s$@L&T;uKAbyrO(^p*(4uafyB{rgqRh*`r_# z^-wbO&dV7%uT7<%-ONYbXA|Sl7i73AC$tIO`zW-cuIrchH^q-JBKEe=Ci>1onqFBp zSt9HjBa2>LaBzqekI%M5NNBBoO9shWFg3s-SMle*gt;bp8M_IcZxb6J8N$o6qB_0} z9dY_B{fyOC3=X9?0RgI37ykHjVVyOz73~Y=s}#}OA|KJt!g%auyDQysy!IkeZOcoT zx*o^@J64p3fb~VA;$Q@w3~ToF_xv@m0BH_?7n0?--|cp)q$cx40rg(eTb&_W{=5vq zPVdWvD1k_tls7+_LK%{XF|aDh7am_SZsl6ATNt`Pj!jayjyO`vbQWn5!{!YOymF=oaW7eZM4P{o zKbb3>X_v966Z?!(tv=P^CyJ@~^5>ORUDTFkK}A(0w~a^qFWetFuMFAXs643R2 z{AFLARE6`3%Y#=KpUv9a&6lPEdqtcQInHA7xJt7i0@Bxh)W6eKmPu=mYjZ^vtk>nM zDmxOOf}xH~x6V_{)v7S|kqfy{1t>Ae_L zQJ_o8I6L;kNa;pJphITa@dR*idb|y|R`@#1?}t@A@7Za0zs&D>DoGExJ&J7%U07i+ z8o@;3cASkCH|^0~n?fRwc-Bz{h?(s+=1cz?QOL_LbpS@Ft}pq-B_{WyaC?w!SAZ|p z#sxGN_vL_}qMw(q)FKH%WDfhCqFn>~hPz*xS}ul+e05wb?dD#jpjyhrF;s0KA+#W7 z)8H2TJ@MaF%U#G+h3;pXcMLP&+~1X4EE7*Nh~8dJ+BTw=)j*0I;q8TK*0;4^qdr;) zx8TI83NQcz%OSb%_@^7x@s-dIeuKScAl*?l>=#MA^QJ4n#k` zetshNQAoK~!ELyz_ZZ{M53-6;x}zf;)nZdl(L|0OVP?gXqccAEax^|E-X}I7+uwG4 zo6pB7WodTlAm1Y99NjaX1MA>VtgqHB({0h|+2U$;#N(!8M;9teSJxFO+}4C=BnfF1 zi(oq|+T2&W+vT*kmlH#hnqofp@<@AW_iK>c4zrS}|M-SBQU@iAK|(ijJzX*Lcr9Zz zsOS#Zi2ERTEi$x5{mdU%e45A}zmS<*YV;`r2<2HvW~OvmOq>BY4jc^^CuCP50Y6#x zxi5CC0?Et0@yP^u*$>Ixz?BQtPv*`qxS5FZa+v`;>_`d2+SZJ9*LOXG3#0=3zkaT+ zuZXU=x{OA*tcic6%vEVn!^-#&4f+}syGXt9MIftaCNL3>|KlX+x@frCiHB~U?S*$! z<86D7ypM~3kyw^LI#c#0dX<6|VYO ztDL1GrJJwX>_-V)+V7!}k;ZR`JCRj@Szqw}-beFWe!A4qcl@lR2?sPmw>y&ew#gqU z33FCUg$>?IZ9cC_@O0?vcOKKLv#d{NC&2ybCAJZFr1yJmv*^VJa_=@n(EZFjr@>-^ zgVq^FGOD?AjIl0--7-I=^;sr^sN@k~!_O4fu;CBew8+&`L{WTCYWOlOPZ_Npj-ojs^z7Yh1J>LHr+mhd@ zfj-M1J$4_V*J=yWN9fg9yb}>T!VOAo{qC3n;c|y8M%^sIsNNI@*{}DeAb%+MTPN#j zwMT!#P8TchI!!plR3#@+{TEqDkIuC0WlligBn$0OEa?p8Wzsb5eI&d(4*p^jK^HL1 zS#e#kF(J zbFt;FS`0+eA&jRt%7EOjyT8P4qHF*#Q>L{;}}R( zv3|ce8c)X|Zk;Qx^U>_2)MY0oG_KQLv#3iPz2o0pU0Z|%V&~RCSRT``lBLQhg6x+@ zd4bkedAEfI%W6HR)MB;+8)AIHCt1Qtz2Af#DpzsxmFm}J=@ot-ANRW?FLT~TJT&9u z2*|lLPISi)T(XypVy_K&}`Zjee-o4(^Kj8i#ORktlJ7=Dwgi(m41UV{edr)hn+443`{pU@*@`Z5tLX|>epe);0Q9bA5wYzy51faD z?LBG3aJDkAv;A_Xoig*$S1}nGz&h2TgaD1&feBos|o41)Y zvY7m8vvNsp7id2;k6_;nlD(;S{HHh6Yf=rjsak@!5pP2qE3 zmX#5JT;`$%2f=5H{aRe5P~)Wi%<{cTB;mblA)5%g*#IVNs;hI}8o2akmbJcveh%QD!e7o><1TR5P!sW;yM0eq@mN0fs56%>BvfU}j155P)#dcaS+EvY;B!J*!5# z8tVN*qD!9Dh1&Pkh>U1aimzkq2*Yb^;dIv(&3vc!svY;c2A_~^+w#=x`*Y<{^$d5}^XpmJ1w!_#uDaYYt{qtiyLU9|7;%fU+aF%eV_f3th&!# zd+{T$fHIh+Zhx`n7IRio@((rYaD|@A!l`SWkVsl5uG388ZwpeO>~|sh0}!rbmkdg; zYfFw6a=x^RzV1R&i7}pY#BAMB#Y5%FCz~Bxn$mO>DrZPkvGvQGSS~gYBUw#s^YyfI zowEWX5mC-#M5#N$3ukvr!zXpplG2r9Colo1oLEgymBpKoFFANb3$uKS{f}tuafAaD z^V`j{R*>EcZ)z3uG1_YP5`(2~4kNReM%z`()5NK)FGe0fob6!H)_)Z%iGxWe zcXJLWHrbv@l?@sQRz#EVg(FrFe?|W>BUwfa1enbPibqLb@|E4$TNJL2UJo3QQ-Y&-3*PlbPSP#_CV3O`1^;7Hu3ZmH>KRAr_v zJkSFBI5@U0pN@>Dw{O-3ym4lXrIZ7gn>?-YMDIp2iCGfhFic<6AYZo$D&P9HmBMOz+uKKoU#}17iiAxRMF|Ey$14Tf|qh z=2hwHft^-cnTEALEgdytu%XX3owFIhpDFXs_?E`l>Jk zROT8lf4aWj1=hj$FBI${shW4D*gWI9|r zM^7oKT;k8geiayhc5SMD3S&XB#Wq;7)S#dQVjV_hWg)^gbXfT>5&QiPS{W) zCT|m}xZ~Z_k-j}Q!X|ilgt!Ddd2b6ks1N;>0z9#VihssZKR4j`JTqvI|HAKS<+uI* zVQ%cdiUuR100$!XvStTlhzZJVuwbRsfEkZVHla*>q ze>TMi17gp0F1v3X8CYle0KXpqavU&te$1j{@=(0p265pG=q$spnTse{f|D%0 za;rw)Zc9s?rMx+PZQ!+xtD+f7+Or7XvT~x2!oEk;)Ju0dI~g?nyl2CL1=$;#V!PGy zVwbyn_*z*;uhwgf$XH$qcMWg}cyn(jY}_9(yRDNHZ5w`aO_%Qj-~p`~_;PPJyEOJ@EjQaj@KAxAC>LDx;#d+&GHvEB~S zsnwoqtA}7azZU4CQO*;Y2J3~*)`y?&bm|&gT@=;JNiX(bx~J4hmIi)vi@Y2_$Ut~v zJqH*o{I{n})7IMOQii_sd%cf$x>fb^{zDbVmoNEuYBH?fb6_+RKkm;C-7L3@=>jmu zf=d5p1@ML;+dZ5ikdro7M7#A<`>JAjTfyTOp0h5W!|-o=v&BVZ9sKx9Sr@|(>}fiKHp77z9|{_iAfBA`y9inMkUn7E_rLcnRcBIRcwskY@X3Mm6T>wL0USK>SW>8I@xH zlD|}WjOb?gyH&q_B=)>)FluP1sME}h&2eY4t~@`+@{>~kL?fy^!WzIElX69`of#xD zM3=-zKfa}Q+)L^9erCNLK4nu=lVPOBmvkl`$_G{qdJW7&q3jVKcdsW?ll>`J&QB_j zxj%g*jgf4b*(O0Tz*+dbq{>lWSM@?K$(sAl!T*7dza*PaWHjxOPD6??qlsdMR`lb@HSakq*46K8ls``J^?(szve6;N}iv z&LbO)aO>gzyskqkv(xh899P!Nyc`OJPw?8WF;Tt!=T>0i^}5gXe?)>Fgh$JcS8pRe z??4?%e&jt`O?cquy}X@YxF0H@WF=94si6I>_@v3VnJ4E^s;I@=_>Znfb8E)$_3YuX zib;c|Tl+g~AtD@qP(!&}PjlGfu#*amHI1QS!@?hvxhQ_93$wOmzl-BZOaL4HF`{8RE@k z^MIZyP8>R2;2+Z~+XV8?Gvj9(wB zt}sy-Z9VNWNEu}v+VtqohI$0UVbF`jvjMN2&(E4fUyM-&|9+t%_~l{jQ7e}YpLWL2 zrxo*;4x{;}TL$>qT|0%ro&F=Du{-;6jffSS z5KXKfIa8UoWHr9~gouihEe^gmzcUF7snQs7&Bj_196+7ttUAwSbQ7WumsgjW>d!!b z{x$A33~%SKJ$?J&>_HQ!{7&4dx7SX%@9S^Mf8LVEGT#}Ly$zylhoY2k`q)rfabdF? zJId&gEgaoduSC{|mG!}e54}o@(NrCx`+V790H}qHwN0kv0Y4@R^;6r>;ZEgw9n>S{ z+TrB~&bNdu8?beyUZ{UOjz6`Y&3>;Dx4)VQi;4Cx*gU$+b5?tknY|P@PsM_ry#MO` z5=BW%_Q}J)R8H@(iwFpHfl=AQUmaJ`Ea_`o+|68M&y6O~)zr1V=N@-!-(|Irb+hSX z2k+75AQN|BWSb+oU0QTu?^v|r`<2RP+eBN#mRv+4e$qbZeJJK#YK!~R672*R z@8vIP>3LJNAk_-@OqJd&5>z`rrp!b40QySsk(XZVzV@NB1ZBxzWj_rlSk<6#>g`FP zh}<9Evq>P?%IDISi*!MjY{u_(R>pxWPvWkrVhK;MrR&={oUJooBSI?+`X@z#A7j-+ z7}?zQNhOIB6HQnf(a_!~FJ}SnhAG=PqB1W#0}Xj|PH|#akvGJvjcl7?;ZiK`3neR+ z+ZJce=RQbR@|U}R_!2jDYE^1($(>&ZQrt$H9RDgb{PLu)m(A*V412R#(I-nTdE`rE zhI#n;5yS|qf_S;_FWQVcoLE@dJ?mqcO>s9%NLJ*aUD%v4FWm{j#t@I2z zx+vEMi_AxrkFK|GM)>dcuiYKISyW&?mDZbQBP9rWR(7|{78dW$-S@C|u&?gAbtm@? z;`6_N3|g%Kz3Av4(8=|so_l%ursC0=%aaayzi#R~(hC|--Z|3iHeZZIY>p?)n!mdL zBGTih1&1CHhTUq|hguFAfTqhW+tr;|3a5RVeVgI8^Ir+u=cDRY#Ph0TVnzh&*3tv0 z(E+dSp^5L{wwZ75gPNp1Uvbw{ z@O6l17$I1Zfcsz?CKTR5`)=Eyp4WVNf;&Q;ve$~zYfOv{bJ-2PzMp+_4(e`BhA1A% zbP%Rsy}OvP#x)pV2Rn`f(`Ej7?Rdf!xj^)qT82ci^*Eq6aLmcDPkF(${owY0L~kY> z-TC>;wI_s}4!M$E9CP>J)9IKRdJDHy5KQP+r(4y1##!4NQbQcP^Ra3>IApuuSDJ=` zI|il;K8-;lZ{sunGJ*Q0Sm1_rHDXOqtL|oOW?14bO10m`wyy5V7u4?P{P0R!u0v7r zUl7k7r2oEm`Dh?Oxzoy#yV;^6fHDLBMf?YtJoeS+Ira`G4P*tW&ou0FvVA-8yO8rm)0hhrVR3w3vWKzH~d^Ix6Uz#KJR#yI1L^laL%2T7YcjLKSM zIb&xz3CqPy1Lj@Ye9yP6hg6rI?||OL-gV(g_H((Kgs44v9wDNi#FcG~$#Fi z$$Lnq={NEzGgzESM#(V#%%JF~J?Ul~9PjWi@lfVtSw!IQ4)acANl4b~5^qu-`G1JB z0`;#|MsmyRr+;G9Td7|@c2&bk<>cndv(G#ZM>YwgAhD`C`>?{<=5j5;6J4V}hjSER z32ILtbq9Tkc4?eXCcM_(I5Rg-42f{nB9|L=b%_Png9SF@ZDg&oj2h(ba6fKfil<%5 zQ|FBN8@c!?K~2VU%rtwaUCG+j@hQ#g-rqYU4_DtXk?5pmB#kZwzYSy){AhRR%{G}v z&#&f65?Mh;=0ebkv6iK$_n^2TrxA%+Ix-^~81r@YZz>JFs2fh2o_E1~;|?|peNE}- z)-@*Ar$1wLzi2=~yYkGp2_cKwNV=#py-sp`<~dA0`fk&QS`t^oFW}pndO1eJ|7fgn z{Ic{%ZRU@nmBj{k;>|88+{-)GnQ!?2`7Tn(8ZiD~=HYP~fHn4hx>P}+=~2nhma{qfvMnuoheX~hDqSxY>x)-8KV)66@HH&*~>*&fS~jD zz|Njhgnh#C^@`Sy&PuIazk}@dcAcotlKd03Q@c=c{?L2&7%yQ&Qz?ky^?m1DE(haS zL~PO-qSq<1MX!e~tmQwVJ_xv}Hsd5g5(~q@VqXq$G`!g|;%_H*y zMyDhdhjG!MwLBO~GX$U726953tT!yKhD)Eb?JlbF=Px8Ew57e~;R<5csOXIEtQ-`Y zr^2Y9CHldSh5-aueAe;twWXVjo2&kq5BmYvRs@%=%)^Z5d3XS)h+V~WV7E$fuGl)? zt!c;Tis4JEREN`eFsLa|`0*E++ZyUm2odtszG73pd>mDcF&~K?i4Y2XuoQv}77;a8 z9Y+@Ipcz%u2m$F~9hR;$NDZ{6F|S&DwQ-ZUU#na2RIJe&*>_(yFLMXlbe`jX$~jg- zPpf&Rv!HuCsE#Dt!arP407B?8Vv5%OkEnNlmdV$GVHCNSXbcW-b1$&FykD@igm@e+ zdM0TcxR*|V_F5LNbM1;1Upo?q*zwsZcxSn9z$onu%T_cdBlZI4_Dq?OPQk3SXwodq znPDE1@2c3ROp`9bGI+lp?c?4p8!#qsZ5T>fGQX4Q`u>ffgIq&km?r@MfF z{pz#OzbP%0UbxW)SvMlNe_Nohmj_ytWdKIiWLV_}<;99;K~JLqeM%n-OuZ@b1DfIG zvfu_KxdD;nfVP^pV3|F~6*y|sQ22hDA&%|!cML=Cx zZSOzqR{K?rB!KQG^AB#*@2mTWp)TB+M1P3D;iLbi%H)#XpI z5B$Rjoxp(+!GB81q_N{%~5sQ2X!izuqFMPh&4~D_o@xZpLCvnO?-?qD&am>!*oit_{#1SWsc6(g*kmyk z9q{_V^a{18)eO5UmK9HraZ5gpj{ZkUTuX1E3=0=>%s8)1k`>Nd@cgA3)hB)D+@F1d zOGkX}n+W4?7V#Z38FJHmO&_oH{>k5;te11^9&%?$CyO;}Z6ML6<~Dwce0sqn_W^cS zg()H*P^R{4>DKCcx4Z8(B=1dMBNzsYRX&XwpyI+8`XadwoI|y(p7LFZu-_urxJ4m4 zu$h%BIi|NkGAqPkXB8?Dxsw;Dqc#3R<0ybgg-jlJFg8CC>;ZV(9Ar+v zLVRg$DQ>a69t9gt0zF*rC=Wu|0|~+nz*;^)32nHa9Esg1dXAoKctpvn^Mq`+vJ2F; zDp7Nn*8mlv=!P{3BCq>Dq^*!*ov`jq?CO6+O4R=m5%5$5KDbK{p2;n3Dmsf!I=^ia zCT459Y_ixYt7eW4wP&H$R^8*-SDx&5Hvp6Um9Ia(7BCTTng{7ujRP;)e?;NeWP!)-p6Nc%-An(K zTQ$Xs-#cM&mEfx;iXWd7MtNXx_;48TxyL|SofyTmG4~y3Qc?3Ne(&0`)In~TkD^*t z2iH&J`mMMR_6&mhI`SV;j|^;a2cthN|B6FxZC6pgt%kovJhi31xs9UQ9~~I%|Hc>b z=PVKj$c%%+pxG$(>=sn2hQik z=+MbtMY|yGyf=zYp8~hl)8M&TwQ~!B+RXYpVYr7A3{|_ zK)xo4)mx8a^i1ixp4m!H^5YmJ(bbjq>rs@OxdD4HQyQnnQ(}LL`ppd(ZUgC$pdCQx zstmI`q4!LAgO92MW_IPtls}{X;^IMyzw}J`(X-hL&vg(q?tQG$0R6BX^5>f(WBY%m zX2t^c=pXd_plUBoz8^hVJqy1>Xf2F0cd7yVgiWK3I2`lTh9!U?7(lHafl;lU3%(%g zdhlcQwPoE&{C0}Ul#sxY?oma5znXQdXXblh##`E46r-G19ohJB_*wx3rMV12zVg;P zQZPrIY=FCY>ttepAHPA?bAQ4Gnxo?6B-R7_{PwIaKq#?#;M6d)$dS6#vylfdE z?P=}pB``nj3vy%2>+9LaM`>!Nq&{=#{9;%a3=SM@Oun>8(83J{GTuSA3nIrcy1#Pv z^y!ms?tb9J30#VX=p;7Ss#-H`lqK z(N2$Ga58Tn7qb*K=__4;^cDUh7tBrI(zJ0UDQ9xjE)UuKRAzP0+4dsia-vUg(BGJ{ zVrdxBBqa{njzEr9As=7PCjx^<(c>O_VA9aGfTb6}c`&xS&L3IrEf=3Dgbt?PAKrTa57Ya`YF81AV9SgT zN;}cJXjv18gRKB2s{Xtu*0I|UxdA@Y|B+Hej zH}8hl90#2_WfUdjK39eS__QiWUG|aBjWO;UJ`Qd-EJxhqEf|cHm~w}NefuCgrI8*z zxfkO_2@o%ypVDh~_fCXZlJRWsiMdo8=1ETZur0I19ZB8Do8laQVJsWa1@S_?>QGp{ z+WJh3V)2XH!V>ATS1a_xW>*NsbWHQ6i0_1Z*Ukz&-U--B8l1xAsesY8#VK0fe8I_~ zp2Z{C$bH3H&}31*ChG@2^I}A^>3k+559%hDQdfkYlPA5G3GY50J->P-_-KMERrz<=pwd+f`0 zBk4cNR(En3zlIn31df_ZugEUj^{z@citl#o<88vI}rMSo3g=bp^>5M;=M5Kdm zt;CE!(Qh(coy~Q&Jy$}nV*X(7-iUlo_J4)Xm4)PC`8B319+ciBoD;?WZMt7=I#NkJeUg5`Lzkeg)%Oer^}mmA zgjorN%S!-&z8<05zUnAU^#&EK0nG06fg=*uMnGy zbm|7*#exj6!5iSmcDmkFp&ND=P)uuhm-Q3;c9XjWCvnH2-svzJZkUL%0hYr6P7V(M zZ_L5pO^L#K7eGCevMlA$gCaFA>!(%IHb%CsH|GBlsYu4FeV)g(K3EC@QNzvoD;F6- z7GsMRt4ejKVNX&cf!Ej-15@*2l_GLS!cdDvaP@D|O||XYZ@x;EWeF78v_{P@v+ahU z4v=I$R52^JYOtB8M3ZVXD70RIy>7a0PuIt8R^oHs>85e$!v2uUco2VS;I}vC7W#!m z*RiYlFvitd7>-sHz-$Bo`me4h08kYk0w;f^PW8xF6PqSpYxhT?bX1^jQ8LA-C8Xkn z%QoVhR|4O2J4;B%@w#R(#`e%1n2un4F;3vd&C?CThKD0jthEwx0^hUP&_H*=8~)+v z72%22^%Id^<0+s2`d&H4YW~<=46W+xn+I`tu_<0V#2Qs{Z#Fpb=_%H7a!E`b0N5&5 z^iabirTlp+K?Y%jW+yKLNn$M6zi!Vd$st;c&YE%`^4mh=(CXA#^7@tmy<1&Q3X{f5 zCdqzpDpW;pYub}D>AI1JW?h~y3WE>gzU(+vsNiPhvdQ0eng1w zK}gy_sL@dsYR@q3=*Nn~2%F#CS^v(mII)2JZQ;I$-a^L5E<2h5@fz_?Bc@QPPBAPK zFiK$bM7i}c!8{(U@~nIt zmy$@~JIJs#NO6X^}$$vzO7vy?h_#RDg zmIJnN$o?lxy~Uz6VGPgjZGj7@_8<5M?6ugy|0v!2(pO=YN+q}!m&tUK;C&;p^AY!W z!lAIXp(#P^X}~vDM7!UF^_qj=CHQ4{kIM6Y@(M%aJK+`i5HcD06kK*9PC`o9AW7_4 zHv-j;dk+Y%=*ikWVZCs`h&xO*{qDF(lB_>1hO9U4KHL2jFZ(9u5KH{hT;PWe%U7aq zS!9!KL)z&n)#Xg;Hm#y-*f_rLQ&1ZiI|!u(n=nOpl z$`s@O!Pt}TnpggqY!Y4p!#>yRW`aBd_`2WXl1R2*+bn2M-ojMwG^(Pw8-6=BCf=Sr zEpKzZN1bQ6{{Z$oTUmoA3&fiRFr<`|=0nkcC(NPkdvU*2wxGvGChs zbj<)!RZdaqtRcn7>t61o4C-bC?F|eCA>4Dgu^w~+oQNGP7JKQCWluu#TuB-B+K7g+ zk042g9)a3RY0P(YPNvQ(O9+%$|9L^AU8s)NpzJ+7a8Ewr`OD1{-P5mO)c)(LRAfev z#p2Mv!+`LQtqv07ForQU1c&*K6RTTVl?!#urglv2z8>i8GeC{?M3q~bB=%=LZ*H2Y z_v+_rYl|;Pja?JG9Qtf1rSeh)&qy-cd`G7 zTHrgV@=O&ZRd*)G?!N<8_Q~=>)9PXOdsWvlYjOQWz+L|g3ouO-H2?x};158*ICPz6 z^Y827cE=eppm*FQvS@!g_naj2cAMxuNFk)M1g*(vM65t z-sgy*t&oc3?|&1*ozh0Q}p9hl6a`!Xe}Ywh<(V1gn8r@+`17YAqPE91bT8zAR9!j_OnV)zp5k~M3x)E9`spq zN&AQ0=}kT}elJ*YwA|JZovc5zNMc5l;SCFcujOY~c|_raoHiCgQLK#Bu_hcWu0Bxw zyOr7F$^OsfTLyyVn=V{D4%3X3WFk3?T(((!j}kqm%fW*HWPt6^_U64W1#paX`_Dq^ zV103<%ex&E=!>nTES4EtPecCQ%^Cmk4*ghV=O-`Owsnw|vUUz63{uWV(c-VyiKYK_)L`6KWJ(^rgO-PO{Fh8lZdXx> z7&73;#rv`Yf1w8RI9V(&s^+9O>nU0k%K8#(Ntx&**504Otc9?N04NY>dN$xZGn$ z@m+P$h4~Nr!;cR&t+<$Z2}QC{;QaWA50s^p6_EB-pAZ{{(R*gfazo|gd>nipbo|-1 zGXI7i4mXc>IOCG)ikFnB)$<25?OkBFxloWi1sWdRrhw*uJH3hZdz5H6c^PVxm!ozY1(&d*O&hh@pf(eEc)p8?VNTV=ZyotA7tY{$5!3=y-n{%5DUk!jmyh> zraQb@$BCk;)e$$;J<=u0bHXnM%9)jX`(jlBf7bFR5Q$w<^{ryLK}S))eU0&9f^EDg zK;iByGs~ujM&Q`d2!~ud8WSj8qbncRCB?~B7uyNfo343=qORNU-0TOiE+vrFvQdvb zH#Te|n|_z@1eF~IBqO-R+QB+g$l8vC>mepAD}eX=cI!{7|FKT2_T#D#sg`^4ocKnd zHbjoiRY5@_I`}+4f@fskG<>uZul+JxDsX5R4zl!oJb2UFwBTD{T#5{SC;86Kk9UDdE-aN<2s8;kN(83#3DyrVrqmqwzY7m;Q$%LdL0dlTgwwwtzmqV`nr8Vl)+=nW z!r}Yr373k4)3yedi>I>jpFhxQRC^3xA832q@w{e=%Q8>V#kzQ)!J|RX``k|Om#vS3 zuz^5D#oI{Epw}#ML{H}aDGmEMxq6kwt&=GOlWOTd?;Uo?Q|UiS2dlQ;TxGCocFx2O8*itM^^J_!b~px1C<31WF*xW{DrcmSPZ!A6VY!b06D zJbTt&ya2XYiZ+yr$79s8;gVwf_K|0vc%E=f+E0-TSC^Zubs_AF>_hGi_$>!4zgt?oCX={uLJpF;$6v6OC?w zeJ7b#b|q~;WTnrg+;C4@N`K4sOa#^ac&qcA3)g&E981Z&OuaAh8{9812&|cqx?f{7 z*vMVOJ#VqH6c4=znc>Ra^9vh=PCn@Z5#6c$-X=j)_igJa zx1}YCwu&oli-Pu7#SD?q`vei#H_%!G%eKD9vslWWIuyU}NX>!vi_FF!B9|(&A6#;008M*B(Gi7fKhz zq(*weya#q%Vp~2X-%HwLZHH(2KLqY|3g+f=S%r3dU~AE~FGF4wpNLqziw6;M{K4q3 zUK*bb{TD-ZE~4QR5ddQApR1f?WuFK^V$6BQNuxHwgu7y;rSMEv6PK}Yib|^`IPjk?3%ltABM96U59?T|8U@QF6A5>wz5Sr)wHw*r%LqK4N@apf_Gk-r57T0f(=w`A_+J8N*Jxsr zl#n-51?E2@vFhtFqHR8EjQguyT-sS2MrJ`e6*FNrAp7ZGK^AjH_L$8+zQlf+M;H72d!R5=!6}o;F$T#i)gAenK1J!A1HJ_R1CW8I zn(I89voG)XM!k~iM@h-b2ArrPy0=;cm*WgRfbe?W-slqGa9G!m@00~>5#y+!FI8;R ztL!Or3-~Vu8T-wjPhIXf77p3GD*3P*cORd7#7P)wY<^Fw%jb70isxS!W%4WCIY_@z zM*_ux@>R81fgU%t(@NLJt}sJv#hY%wt9~lbJDh~X2rp9fOZ_fR0urPu$}CzK7H!>p zmJKAXS{>XvFP&>HVt!_K&YeBVv9n?EF+$A@il`MVA5nVOv;+g^{Ot#j8Cd`Gjg)Q= za<3~`K1`&0bare59{GohBLXQU>uRW&acveLpM*C$x9D#%R^j8f>mocMgl!sR^8rw( zVo>wq1y>EKVJ+|0nDMn?Q2chw311K8ZrvSZFGC3T;Cb z1fx8b)gscHwgvJ`Rc~A=jwu;p_k&;fr&gXnsaMqsF8%nnUQ)0OIQYjFO4lKW6YF(# z=;sMH#=@2J=l}HST*gHQqd7ddL&0Xy(>Y|=Q9argnS?olYNddumeF*XHb>0&vX`A- zTFIAPVQzOk?Lcc3Zrm>x13g-noBi#&xRtDJuw0vP{ge zC;T0bK2kD{HqDgU@ztEt+#I}MMI|o0WXtDsoKH4ld@)b8i!s`)+Mah5Y(L|K)gS+l z$W`cO7Z&457LLV^;moKCA^S7X<|czuSb zAkRLDw2VCaGxk3mP20uytxXqVtC}?{OZhdoF8LWn&Dz$Fd=#W)Et{Brm5toNJxct5jnDJd za3VU828Ug+(P1xNqbowh+M}xke^G_mKdot)w1pbvG(nY5^UNIbL;lz(D>U<8CU39_ z=x{Q-0mNUXj)(Yc(RpDEQ>LCdRUG#T!p6Cxea10;6Ys0dGY<(cN{ znl6Nk_Q&`Xn@6!(xj!&1-S`F0lg)l{F0+qH&kioC+-oZBa3 zU*^}%26sN=1OYU*#L*ofo5fam#8iMi>z?ve1_U)(&qMDoQ~JbFR=1DnoJ3>_MT`sUTuB&}b{aQLAV#3sKNQKE0Pobx37Bdk%ois99G};Xt4it`<7Tj7A zg}mqk0%$09FUK>K2}Fs5)V8DQQUDjei*jeEwDF~3ALjQO!+Oxu6zJDtW)GLX_Nctr z^1y_gWI~`#lMcJu0=WER)><8#?R(On$vpok{J(C6MtRuyPh^j(1OfFJpe#az=;i|K zNrg5v0tK^wWAFD8D8?PX`jM&|B?AUX9NW-xt<4sfuMLMeZ;Lp&GJ8XrZ%8wNr%J=F zL(7bC8qH`RL}%)!>ySsmW3^J0n+Sh-UXPDPh!uZd_R2BlJ&XUI%}(^tJ8t=zB?rZW zwjdEVEV5o#`E;4x`My<%Ei%ga5fv07_LIpbp&$Wj=azUWhOa+PgZ_?Gn_(YuKp)mFzj{%qj@M#+=( zWZt)vcl__;n)anOiUC3lr*-&?P3EVY85Ah#EdD1e*XxM1y0wf`;Yf9@!6ys-*#)ah zM(RaIO0RzWCO_qY? zY-djs?Bz6YMoNC@hAXj&Xwt9K+^bluni^R1cF^S_!1JBwYItV~o6T>!|8QRrS^qkF z*1+8n$w;(x7@n^y_A%B-4kfL};^AFzQIQujwYBDqIOmy7>Ycv=ax{1J@tHWK8!f`f zoIeT5x1^;%Q`Rs!n%#1(^7+idi0tjj?kZ9_|N3Ec#flG6)vj+aO}NY%pq> zbyrb&VApwQ$XS7e^$#2liv-aT81_Q-Weqk=gg&HXCv0D@@EFqsXg0h?FwbqI9M#`E z^B;vb5I07&(Qaf4fzfp?N3qwVp0<%kG=EIq#F|u(tHPyY0caE?!ZWJ-aqYd1#Rp{jv4Q?=7 zMTcjy1Dd>}K9_&y;U&S;*cKMOd4@Qz9N61(N7Z2Tn@j|6078;tR#Yj~w?8dsgo+L& z81;c6TeOfZnl#C+#xuaFmMuXOwk(@nRH^){H(4UB=4E%9 z`aB`May0L&+d>zWqTeU{xRPx+#$AwBSx@5Bb9#@M%Q$|Ledb~6;&LaGf}Hhv3lDvs z&3~tvS$4v7;y7pOefUk~fpXr*@8t{s5$yo{B|JGk)n8%>$Ai|=%_i*%j>!v6U#i)$ zV_f>=7Q7gEg8$_I7gDbh)`Sy!h8IV^&aN1NCh&MhyghbjeWWpxJyCm7-^ldQTuBBj z#&-uOmEH9Ss_(0BYlvrruqSS7i!QV~t!4L*-SyR(z%AcK+k6f>?iyg~-(uVwpM$Hf zKM>Nt4L#0fklF=qTQoMta@rqSv}*A%#Yh9ZtK- zsy5yYpBPicg1xJ+F)4o$3`p5)%8S#uusZ!$MwYAzLz555Mu~47i%@Y+>y=~mdJW49 z!`CAjH}qM!pf%z2mqibsj`{@6BV_eTlQunPH_&2o%bAVPdLYIXZ$7qgE0Y1wbEaY^ z3ihcz6Zqj$Su66vJVYQV;zFnHwwB$u7H5{5S@V@;m0#6iG_TPsu0%F_%l)7v-z4OH z7*@GnJ@c97YvSOe*qL_bOf&R@kEWZ8(19y^#7p;_>8zrwRiu0?OJ%4K^TXH5QsCTKW^s@p%+P`p9a!|_R*$r36C2ahl z;?R9rgifAniap&!ivLAE=U6cjEW}ss#un?bfq|or$NE?~>Z?Pil$xlX)(IN;o-7A> zP?M7cW_#m#CE!*8f2E#_c%D}#f*gdxfpp(or<|T*YZ7H{d1_9`Ew<;>FQPZSiQs9K zC$h)F$<%q)^YKrB`+Pr({Ph+`>$D)b5IHBXyuY&Y8!64ye{i=%6$U}*ZGqgEpNrFjdP z3|6F|p$Rh{&nFgLcOKlg>3V0V~agX+=l&7&0XdvaQCrd?4TaT^&Ne!GIXd5H4NZ zm#__MgL!R?wk9NbdJLigWEY!@!lr)enUE zQq6W6pT3WytCF<(&^xwW&4#YC75QlJ*P`cEj21;;b!+@V=jM!52k?2<|0U6jz}JAG$$;iv<3Ek6 z!Izd>%yH-fe4R)v7AZReT zOKdYSYrPe4*0Q05@FcTD{}FuzA}0u*L{nXwtSR87#lNHYF|IYL(c0r#7Nu^{$De1q z_Q~A>7n{0@Ab4Zg#jFsXp)Qy3h8}bQL<^z{y6o!1zeFrkQRG($DdOq^v6=K2arQ_O z=Chdg5edzk!UN+v2>Qs2Z~)`{!2Y$lwp3a0kT&0vF<$YQ?2c*|@##T&*3?K_)L=mJ zj}T5`Su0nSy|Hq|Qs9RQ!WEeKGEOA6SJf3TQ=wvc&&zV7na6 zPB&9(hpzOEyGSlGs+Lg7@{J8-g6d~EUI1O)`z z5Mg4K)?I9ns}%owfQPK8QWOhZZaFi6$3Ot#))R*@T%w=lp51>$xw`PA>iGu2WY9u) zS>jy9uWt)0y4B~Os?O85Ty)Q+mSGq-lt-U~j7Qh91+v?FWY&Ky|BHl>SN`8#->T5W z%O8%iydMLnNa6PgLO8ieUl6igrE8VeSBCD_|FQR7QB8i|wxA*+O{GakK@b5^dNZ-n zL_k5h5D^d}gepx!qIBs+K#D}A2?$8gaAo?_xqhO&KPHmdtc7O zefvLrgSU@lXYaMwnrp5(%b~lU%DyA@Yi5)t zAVs?I4@1fZX@TZj_YcE}FAcgMyd>Lsd!@r+m2eeh3(Maptfe+5poV@b6&YzoV?jI# zxYf>*tUu(mt*2Qpp7aXEE@4k^+M#n1!*q5lahD$-o8I_jF#p+>tk6=V*NPq!Xzanp z_~C=6!AwZo7Yc4S{GW}}_>M$03c2^SKP!@5h#nu_s1oT71>IiGejxj5cph8q#r_

J_*7D-w2(y$MT)Mqbd208 zhdiZdmLKhx7e-h`elJy%{NXD5@CfF!duT({?L|%+ba^b=Ui&+)#p!9G@wx0-PuDx9 zu^V23LIgIDP8&n$E*3CP;+z@o5K+UJ%$PKpHQfKcHhW=mRs1_uJCKiX@DGEe^CIMM z3%&nUX19)P`42-<+6lcCO@0b#|A#>@hjF0tw`pmJkzC2Pm+?g{4YMLml`X8XjsHHl zc!wQsOcqrryT>2k|L}m0z47Fqi zNMaL!5*-KrZLuq~cRpIKbmaJ~MO=x==fA@RXcvH2hdbPgBn}PtLU??QZwMP-nARzF z%v@X=kYDZ!P1Agq)5ei!u>$mmftM6RssQ_;*F}ZT(UHo3xlY>`xkX!Dx|OE^d&P!U zVOCeu@`3;$bo*QI-*C7HAVIv3I=*)e30kN`QnSs0Eo_%A`paUoS-I`gV>@k&+okG0 zB~uV>*Zn%X#$(Hq=r`Yp(IxZ12}JbMM@2=HcH%OrNVZm?xtNUdS1x&(S_)l7zhkFx zXY6c5S_VKZBq-^Kdz$ z-zbvjb^JM&S7BW&+fMDjM2gQH^6X(Fkxu`{pG=Y>APjxkMy>Om+<@bKjd#zu;x+!R zw3^Tokbi&P1EM3)9=p#&6Qd6;_&);z{67pQV&#+HO^=spC78-T`-+!;+<2OgEjL|W zh(HmI44UfFn8t)nFm5lkV;D24x&m?E*E0_RO?sA^CC_=ItVM7YvUXyFKoNR>Y_W#y z@Hk>d)80Hf20!|T0YVREvX92e7BtcbGSr15)D`p0lrieyC>oav3v{g76j2(>@!NQ5 zIpVdYf?Bl(6E}oa0VcBz5Q~A?{>MZ0KOU<8@la*?e|o6W|9?1P|MxqAiIV?%KNWx}LjaT$;MWV&JN{wd zf%BcHP(0e~q$zbING}=7cdQ#mz2=r;nkFRh=g*ALC>+ryJkZi;r{gejOD1WoaykcM26$1hkv+WH>lV^23rT&XX8H*4m*PK(;6yNgUZ^7s;X{MIi5}`8* zb(4*cj%wge)`pR19MA7AL_&y9J;D~a(6}%2a=?lOP&^FKDG*|MxGdH7X0-q_<|a&D zgj)~yCMnPPtdqn?*7o*w6t0sTlIBbj?X9KMPv^p!ATyVqk{&}N;6O>So$fr17(N-U zT>U+la0FdmI?S;RMD~7Y<+G;$0oOi85g$s8f$?X7cD2>FsH_L7dF(PL5CZ``hQ0UL zcuIHYOcuwfqZ800phSEU%-Rj&C*3!cil`B^)rMtvOV?ybUaa3J{rS0cE3mvBO}So2 z5)Big->!y$WZpVjc?}j!aEbsBvCr=0fq_z`AQjy(kH@0B|c?JKYNQ=Lq+cY zeEGHQiPhdQ00J=kWh2|g|6$k;_yf+~XC(J-0Z9;33>GL{AbbI)oo`WFSdv9s8bWg1 zU1>@g%mq!foQ9I_xut{M8k|v{b-^b*e^h_x30bI@A=}#^6`mAEy!^B#o{TCFNO?f= z=!ydIYqLGM=i%#7G}uJDYFUtt{1(@&kvy2g7n%@;##MqCfSLzQkp`LzOL&W}m~#h6 z&VcyUZUIET{cVpME7MDc)%q_JrO%W|i{TE-mql^zNhdNCP4YvcMdvUZl-vmEf$@>^ z{}7rDUK3}vgLpp(VWkUnZzrCkR(~`qF7^-Mz>mHIk6SNwgDzKNuMoTEb6t7!z4I5V z1CyMSbWJK!THf_Ok$H>m_gS0>U4n2?Z7IE8CyxS_IuY!&e0xff4Aft>5g8+oxY9$K z)1_rMo?73U4uTGa{kU;eEg{j4QnIi=_?weZ41K=Uw{4jN79mRTA*J4IjnA_iQ8W%ZfkM}tt#kW#u2PO z?Rsei(=VbugpI!n@?G5#0)%N&@5j%6AARGPL=%T1iGElrk5d_{d#1? za+(mgPWOVu%-^Pn=UY;5k+i!in5HWgggCTnKZOeix@GULPM|G80>C>o=`xgrPV>VY zZesp;%@3cRmrdhZEgG1CU$X;pGdkt7)U0};^XuKXpKrtmpGq|UE;2G15#JxiA5wXj z$V8HG9(yseqP@o0*S6u~#pQF8nxk>oqepFCk^ z+ZKPzNy6)32XYZELwi3Qp;Y0#WXuCSwe&L5=3Lp%!Uum9OI^0-Wei<;&RCxPw2Tu) z>WvX<`^}o-=Sr~#TGqbJjy*R1X$JFu-@gZ%Jv)%wrr`do)7}tY0PXNMa`mU6-T)OF z6FcREmSL~t?ecALnSai(`UhGzu^J3~+?tt)Gujw8rA?TRb=?W8IvbgHh$ZR#8=Il2Vg7ZS53zb!?n@4Q{RJuJD?Z0U+1O?09yCP_V49oZIJsX>tHdyeEDrx^4`|f`NUPki()((A3vRLopS*w_=3l?PnPj+=CpX(ihTaFzb*Ah;TEfWL>$d zMao658*5*FDKHO}88B#G2w2aNS#vS%d={E5JKo53W~{-A_5s)jtY}=J2naazsq|ls zkzL?egTe<(3DExv&G^`)GzJ5D|HxP?B@G`p?E#N=-x1Bd^aP+76xfL&vfUG)Q7wpSDHo}J zpet-@7*&5#&FQf!8&48?5x0L>FXcAfL+wQgCd3YPfL7$!l2YY45x|UsT49dg-}Mv zEL7BW%1?`SZJvJ(QX~>0%DrpXM!K#whC9B9>W(fmW=Kgeg;-!$!PHBl1`Q+Ne}7BH z>X^yjCNlvt`f&hK%wpqv#$v5IfZq3D)`*NT)fiV_S#^bcn|jN{{q$Z2Faq#j`67jr z3}lHR?+^ghYQ7HWF_7Cxei;R}8xyE$aiivn`Z06OEAlDif$F-tdIKAX($jUQ%Bwfh5$8QP*5_{!cVajr4m1tW)duoY z@Ow^gtJaTuA|A&`CH~~ETDkJeNqmu$4>%1Xzzz^-qI`hM|5(rDi&8Fy^!kr(W}1If zI)OS)PX+(T`yYOFsvjZTyvz`aR2v$QTRX~!TSxYk$G(lo!Y#16 zBx#>H9Of9;PScLQEFRAdm4IIJ)x&?_|NQHGUMBrc^8pfh8wVnE<1KozZ*pV8A>br~0Q zdMrV{lY{TE%+d5`bRlFr7>xUgVuw59%j09d7%UD1%RDCqw?sy0=`U{bQy;h%1SE8l zj~)4}ThUCw46Aa>?6p6)G6TMLlvJFw3KLQEbEUw|z)I|mR{G$(z&IrpT@Z=a@Cf65AW1VCd(7ju#Wx1XZ?*BGW#B z;Qw>>GjS1PNi@r;bGR~bl{lYOJhh=hcWbEf5}q4V{UAJaCEcrU;qn(wg3)S+9ZxgX zo}|5!0liu1mVc!wzyZ2sIwq0t{~h!Wdk^w2X;~S;M75@6qV0zxk*Dcu8+_IYADY31 zN_>Vvh{kMUiJ?T|%$<$y^TT8pt1BGhr_V9SGB_M6GjKoE{=Xh1+XB(bn2qvwk9n z{v~FyuL^wQr+>bZfRSz8o+#dP6C>5@tVS7(Sw^+&Qg%jvc)a2jn`)8ZQmTxss%nme z)#xIRTJnS*#bpToCs_=a+GjS?*9O=w=GwcqKZy>S5=u)>#ek)>d{1Q6My_R4}^<=CvnEVdX=_d@KF|^TeI%O{?5+tUSw}zzWwl- z=Qbmxr4U1XusC-gW?_}1UyxcQJDRLd?xR1*hu4C^*&7;`a>y1sWX1cXS7Mk+5Wsg; z>}%zLOHi&6VbN46;;cbX@^HzwP5GrbQjwan6t3 zx#uF4zn8O*sY3=9vD62&FLWXJ%|N9VSy09I?!A6X&N|lJn=yNSy(-))ydia$T*QQ2*julsoHj&vy+*+Kix>+b|WDm(S_~wuO*5tQf zAlD@Fw#W{#W!8+wAv4x1;Wp#G9zwmigmay2>6dQBDpkqqjn=WAuMRt#lyEPRnIS3j zim|`vUe!ufIoG8}80#v2*Gfhj#$;ZV>qskuL$n9gsGI zUya`0=7#dVS~TVUjTwwo`_v4>dd|h$BxPIdI}st|O^WD=36+z0kkCEADrdcTJgQV* zHHNb=u6_94>C!0<7O)x{<6qIjJtNg#xGj^Gp=Hl4kGbciQ?7aWy(S%1AAU}C!_M=9c&j`05CVnDuQ33WVqhUDp~hAQh1;`nV`M|wT77-!pdbDJ+~eVkM*4Fo5@UBF*v6GK!E5|XrjJHw8)vtF}f7|>_-Z~ zo+ba0>LEmS1G1Z^)Mc!QdoKeT% zF07+azaevyLF|nMjc94~MEYOL`Rd-Gm-ivoJzj&ysoiv2<~?b6Yl(;?hmXA)`|+bx zOKvHv;i``opbs->t+%W|$W%V1>DtHt>BF;guN?NmeCp}wWYWy)v)jiYnrOKL_NyJw zo7%!%KIW}Mk~EdyL)N@6CMID1N{DFcTr4jBGkc;FOdT_{}j2LP`eC3UOCeSOiu zHe~)3W%K|i`T#QF1gO+Z=3GEy_RB9=wUkrE$?tsy1HZjE8Gm#`i25ahSq7^{fJgxo ztY?(|r9K6_Zw}0gRmQ1STY6}WzLBA7aS7_@bf9%|&5U)&CVnLA?iu4D@9aS19?q2* z*mOYdOv5;Ga#qu~8xesR6s^Kn{?QsV0-&Ffqww55M#4>rGeYqk?b!IY*!9CsNJ!(^ z&&5L_T!MqbKRYhytln=BJ3Z?#Ln?neN@urrfw1k*#01nG4Z9S#8xg*BY}zxAIWJ5! zy;kcnk`_uzsjQ&oAv&hrG{9TI#MMW2wnZLaXLL$pU);d41zC(A?5=JZXwD(}z?|;c z7aT|AAk4LY2ZMDX7tO2<1)c-=$Zt-74*kDES?NF{6IXORPC!w4xE9$2+(w>4opR!R3OBN&#plhw+{Jp-UxO0TcaSwVj7-Y|Pkts6Tz#IqWB z$o)uX5g&#kp+e8O3cQJ6{y1l6c=9edfP_jPY@EZsBB{?h(l`diABekxrK&Lf6e|nyW z75_S`!{&l)yww%F2qFYClWgGzB!3I~?~%69yH#!-;UR%P(7RLZ;&%|*ch0gLC5VV+ zN_7~j$X{@wtCEy^-rr1jUClai5pOhrOC)RdvmA|Fy3=)2|JUhcCd8HA97!j2KanS? zZ;~--a_W8i=GJ-SiGfzK+RslKnD1YW*a5{O2y#C`77zhZ?j*%57Np`nzmlvOwX;3M zs@(=?)FC_ECZ23V#rKPo`xRChEcW?eMN|c%QHnjToCBJc=px|TIAuL!MPT9fvVVH- z4DHXYU}LEqH0#u&<+L(P-_<=47go3_4)bQOHO4Qqwy(`->$<4l^vt@Y`$Nj$V z?!K0#u6c5tHa@;}9g&DZNufxG_?9Oo?`Ng|*u|2bz7>>HV#5(u{qqSDw`0 zochIlnDuWbA>=*P2(XNtliWP<`YP|1sO4`UIxQ}usjam+bHFr&%+YzgQah@h`SM9&eUE0n_I|d=m)K?(def)Ca_p zBh${Resox|J!k}aeJ|GN*6DDWSe}=+A%%y4XpTh)LXdj1nY7x&c~L5y0Fw-WmDPwP z*Q~VN>N~@_a@%e9djtPc4P2%Kev{%rsv`fS*q=BDaBBawwVvMNn;uG$`BYeaQK4t6 z-Xx6WT(UHu6hq>es(;$(#w5waJOb$m1o>;P^*Ib^;llfOop$~ zAtZ9{Sah&8A7`*LB}!iUbBR8>*u6i{z|@6RkV;G*%G9+rhhMCrmG%U$-2#RMuw=xR&p#qBe*l-muBOl|>+d^=g zG0~rr8h^g$J}T%Ctbb_n)aS4)& z#9!Br)@1Ef*GR2q2HSs1`Ms8Lq7|9`+*DPa7p2~pw*uPrSUh?R_A=v_{Z=giIF;NExl(h9+W?({n#OZZXtR_ zR4AQ!QMiVb&gs<1D8iDaxB(%-i)ZERIX=hRVqPl|!k5p}vvEe^*{@}rF3Oo#1Y;cv zJ~k{N3j0BqpvkOFdj-FfmD0T8@H*#9oZ?y5JdK4*7G&*gb?=Q<%U$z3O19S040tTa zk}K@f&0wqAeK|dgG@XQ2R4_LCmP)(SiG%>;;v)TEU4u7j7My zGZb?KPgVS)xsspTCVQ(D>Dq{nmv8yLXoPH)5>Jeu=%#$Ny2<%GDqVR$6~1sHlIAhx zfTzaJ#H^VcreIe-7$219$t=pi-R1y1%IZD95&f^5`>4DtN1paw4*VL)PLb~v;T+uo zU7A~~^B2HIEstrn=wQrryKAadG?78v9|rVu6ud25IFH7G`tmi&`2$tGG(dg}q0mC? zNmCm1wx|}!DaQ{OoOIiGR9&ta6qa)FmGN3p{+vJFJaKsC@5|Uf?;$lxPnh?(aPl({ zauu;IzNU$0XXx5_tWy${*X>&x&_6^zf-5=%K^;d2eeGe_TE6;b2eLU>CQENy%|<2m zT8mbRQn~raOU4(ek`$-!miHEg!CmQBT|XO}RyTzNy9*n1P1lsp*4|(AsGiX+nC$dW zda7jV7Wc#{UDg1jIDx|2v^bMrh!T*95Esm+vx>!39;B+;rbmXq1S4ef1@v1ka9UaCje+87Q?xTL7}%|7MaDdlFQ zcEbTNlaNUI0$m9Jb(th}`mJe3RZ06p(lF*q}mk_T5o*2CVpK_N)wt25Ap^AY!mN%((RjTl=Ap(zV5Ar=ZLVKWu)?(mp4v{CaJC{E7 z7hy_`9yk`i3%r)jtmQ4!hQ!5z8NX6;PrTRWWn09bjx=*R`;@f|4An8MypvTalH_LV z)#~p{dwJ@`isbQrJSFX6eC=kIc7lkf5ndjZi!)=to+oWou_ma$SFu4;L2HXm6>#&)S2 zw|D*aS2O8nNeMl&@*Gbrp?k($HZI+^d2~!mYWX+g#qpb^z{DO$iQ$mp_1uYO2O=-F zE$4haC2PqB1_*5Kv#RH%7>zEm2YNN6>D!7wuL@@zYTr}wY-W%lE|9|j7WpB5qQPeg z=!xMYTY?>r0a+j1&zQxAfXR8tVSVA<4OAe$+h2ElQKCX@)dGkwIUuFItKm)GQ zCF>n>({_I?D6-GAi(}>w%7x&!vwDsXxAsVyT~!;T0Ka3&gcHzTT`}m(J$O^OS=veSRDX1n?=yB5&cMCDjo#MArCQa6FQ+BB>p};j0h)kL$^LVTtD? z4$2=W;Nc=N9LwRk8fG6&^FDeE3%gg3*h)^s)uJ-?k(0&o!hMBmzUfBp(RRh;{pns? z61W4&uKf8FP(UaUH|91nLnGppzp|b1p4qj(I0G;bbJ^~{PfhqVo9x*YO+6;564>!b z=73j=K}~uDx2aox-%+5*vR$?VN!vH!9Tb%)?^U%+1eO>`5dv;%P4#~iXL5MDWP_&k_>r=BfekXb~z(!~9bGd9pvXpoJ@34EfGPrpi z2DD(Cp?|1-OO^;Odas?}IM|sp?5*CYmelJsKnCW}doVbd?DU%{dn@jWM9SZ9ViNA2 zMv9oOdacS@{qVLY`dFh!UUB&%HtWH#LEfFSWrr;oOel{9Uj%%7rV_DTKDgEdYLW{= zwHzl9?tFn-KW=)}RawBTGZW*!_N-idaty>sYl7`VeFuHI$f~B$x5~xMwdB{ygQ

0H5mPA#B4A*RbKX7lOYn!KwM*X+r!2=%VlB~QQYC~89fu1bYngJQWCuq z5#$8^S5`*iSr;YN*D!|4P?;j78Sl8p(z5bg-O**c{z7G;mxn;IJPi#R; zw#Xu27Cf)Z44TkDtpOfdja&zvn`q?@=(MIl7tKwJT!tmMiq43tC{!eCQ1hgnK#|ep z1;*$1$(gURxWk1ervqI=KPD?@c&oA{tMG{V-;r@3N5yMywlI^UpFwqFs?S7k#n#Rl z>pg`j!svUQe;c%X9jwsj<-h2?aT&2tyxA)rQ3Xix7hDuR* zE>D|-ONJ*gzrmFroqiuf2>3y@+L8Au?qRrK*LCZoDzaL90NRN&^5SgvFD>=Lv+IYg zLmBIU_n%R#cU?~p&?0%o4EXf^uTUmnQf`(`(mFH#<)jg?7fP12B595V@VV~VbbXExt#a| z|3#b4f_eW`J>2_Kr(=IUX?8}L7%T73K8=0mZT5Q$B}Y@Gaoq4S%1w7gB1Uhtkp2zK$KdhWwyY_ zWjm-GsKajn%Qno)2gxJKcv~-gHg*C!l?U)*>Nyc3*TA$>~_y}CqxEhR57dL)T7>fRP=KMVT0bT|oK-ETP%2@Kx_e{uYdq~_^kQn`b2KVmVX(0~4{<r zq0-k9whJGS&vt$UB1f;w$@2-$%NwXJH0&MJs|ttzDAEtwv_0M_pU+qZDrheH1*#6J z;}{Tp6OFD2aKM+7vo${Vgcc2@@z!Dg%1?DIK4ESAG?o_>HQR7mB8A3E?*l|xU$vOF zKc#TtILg2DLG#6B1rP~79#S7~Hzx@_HlCN&KV9at3F+aa@=B!Et^BD578Gy<$kJ+s-FS$P3Rtg(OE)*1>|(Ayh|$! zi20Lb_2JL79-72~uRo;}{0Gt?iZsPH1GPniPfv@o5c)dHicki}$5P^YxBPa)E& zZCCEMp}2=%qN5N2$2i^E1Z-0z#fCga8D63p!T#$R+IZ% z=H?uotNJQ9p}pskRPF^jOX!IK}X&eFJ%uWoqU-P zaBP31Ht>FTt4eMWyhR;)wp_R;tzAc~o=D%Fv)g$9@2~3qlT!FC{tyGh0<^i5E%+1w z8+K4i3aNJ}p}A9=iGsjs;LqE8{HQ)|?`OM!ko~(SK5M;`*n>G7N(9`o83f@9$O#~f zt}yvYQN&FXt#pjVvsX_mQ)X5JjHT7HG$xq(jN~PO~^_s{u9 zhP!TG-$@(w3+F22CftRVc)-a=>DXE9?IhUZNgUOzTB;S_!&zdZ+8mF@9nOPx6ZMxP z0EbOl6~H4#av<$F8DIOMSRV@QieT&zPar zhSH?>gNbJUFbJ)KdEcOU5aSSDAjQ3TJh~!h@Sv&Aa%$Dj2d4bdt7|7N?-c#j*$K{M zZQqf;Dp_|kqZ}#rZJCnIK^dg4l)L4$#+1xVe!`coS0AC(1+?u_w^o190A=t*~hE|joEIEBpfH`1==J0=e>F}&0)ig&zaP{F*u0t&L_dEl!`3ZFX z#vfMMZ_{f$PRsh&xqp(cDMH4;y0V9xlg8hl`M(+aTDC1MS(d&#lSYFDZ^I{i^tBrk zr#ANhYGx2h$q>4V_?0HXeH~oCZu>mKHP@5yD^06G_Si* z&&o4h!g6*rUt~+aM{Q^{P%A{{M&bR(IyuoAgZ;!v#mbDyiC;=IPBQsz!{J+cy zw8|;+LRf1LD)&jRKuwN_SCULz8$bl0y?q07TfV=eEHw<}5ninr;W( zaeW#+n-tlVq%EQSe&<(jtqjS5&bs8}9}L6~1rl#6|5J{9x$wMq8u1W=n#sZB^SfQn zvw)8z5IC4z8w)<;L`C2#B&Ce8rRM5!GW(Z61#uhc^Ouu$?}w#eueNMUeHEB`eC19x zx}f)t+R5&n^hFfmS{X&gF2W;v`p&GR+H&|!F_WK?fqnt+#KG5(QXAh`(_esbg$Nk{ zI55zJd9;sef1fPv+St?i{UqY9ibgHR?%fRqhBG4CNH0K5QmQRZs@IM^ldP8hTi3i# z_|ujcz~`erMNEJzJQCbMR{@qQgspo0fTe8JV4=!JI{6vX!xvlN>x{WhfRr2KiU^xi zwrUp8G{F7Ev*x>=6e)Y8)$OjkPrXtP54?XNtF>+qT|WD7-KGw|Ni6IH1h7O|eNRNe zPe{WF1@%ps>Vr>VnCcP-S-$C(5h+s>aJ$QIE%#_j=Zp)=3^cAUKZ5;&$=z59Eq;8j@lvi!Xh*Y}Gq|kGe(s zg2XK#r@8QD-*Q-K*)JlyK-`Vzv3wSFKl68+`Wh`eE-Soz*bxg_nmwk^(A#s=fpG}L z#egdE8f7quIkH(vn`X!!ODB|VjPb{v$HJArBgn8*VPu$ZONkcoyp!*MNVJ*wa|45 z*(no* zntJzW#zVfKX=Ho*@2jf^iJ!jR#7D~xpHw#V=!yCA9HF9GFF#NFmSe_HFBYB>>C)D@k2 zqGZzVlaQwD-r)9z3TzJ`;4`^W6rFK%rtK2~-RMvysQ3xsa>Xa|s_MGaY&& z3ir9PLwc2oKyc(FzbEC1a^5Uk=((h?mDDFPBZIzb3OXNXpK|z4=5*@)PdB_Zuzeyt z6xDphC?E+O8Hu>@fL2oimn7~)G;0^S4o!_oL~QtlKE-Ui57}AfL2@o$ap10?eqzJX zq#|1dJIX|IoN~rLZf%t|PTV$Mu|64%hBeK=e!$ZlV%>A&`L7pz9FxN zgIL!@YgGwcWtQ+~$xHeb#JJzq$jPs)LdiPyyN7!B95s_B2|Wfu77LKGS}epR;}~P& za3t5`+Ml(xHrE~eKIqjwO$|GE^*p&tC~it^Sa}n|-g`B;qS<|H++`dtfbHC@EWb8b^^lRS_|8Ior~drD$LWBVGrGiU zvq_A#R{ce%i+etZdTJJx&%n9coCw4G;jw1Ad84)2#RjAn>af(EFSs#_jxBs_nS^U6=UJQ1@U}{UCR~gYrHgxLf$VZoSZ0+t{f*IBKW)(>tzpg|2#sVW5X` z39O9BrB7NADL!OF5-5}7X0v@sr?b0ab!t_-L9ZCVhP8iAbKP6T^P+?!!%XI{dzZNV zc$A&Ayuo){r znCGu+0a*xP^#s!qr`j=m2FqEQEU3fo?8VUCY|%o86o}V>9=83uYGXyZavrkeD+YED zCt!G!;4d5HpnkB9+JvNlWGGC+g#?*WO4`QRoQcD`EOO0aEUcx!OrE7ZZF1jP6tb8_ zRU{Cu;LE3*$Zv?KUNoCV6!o41ciqKPEOkv!cmpg?c|L8wyx$F{ogk^kq~SKdT2gPQ zmSiI#zJz(I?B;L6#hN4|Pn8k}PtEsYAr``hNcn(<_oWcAF4KJD%(pVW#uwso)U}=Eqlh(kkeM+CArH&Dsm%`u6MAa8dD-xt^&XI%=Fwq9W4!=)9 zAS5=XUp3`TUR8=eycHUzFQDSqD;~j61WcKD8e7w^mg5yWg0y;+oebA%o=?D3a$hZe z&Y9n6Snm~l*F8%1t+w7NENcxmjsm+a@Yul3NoP?f;K}Kh!*@!;x8F3ulqJDa^irtM==F}-^a8Q>uN0L_iYx;#_eiEoa)mt$0?@i?)riuvRW#;6 z9m8^iX$r>eD#m_QW!EzN`dxL0Q8YjnkH}#|;n88XHf9ZD?s=bA0`djZUcJb5gf=|1 zRZPXDr12d269FmP2IA5ZUpMRl9umIKZY}q~*qqDIho!d0@=JeO9AxFZ5s!Cq;OUCf zYYV&xjJ0ig_+OGrsTaW8ajAqS(bn~*#8{K6)h2aT#F%6Q)p(v8cdKRcr%xn%qU64% zPQ+772|$Xb^cH2S8-DF0h8z&SqZ|yY!Z-!Nu%>G#;AjLBrOVflucK~c;5(E{x$NsJ zgjK1N(fMDI*`j#+RQE325^0CLMzl-81#>D1jqO{`Ha}~!$ zQZc`N96Qp=-Z_2rN1tb|>s2k3U?S2a-5jH3{~hW_ zSH()zy1sz@Sh}@7k@j?X99YhrMQ7(%N!ZvkV)P>Tzq0+$;Frg>SaCbi(_22qWzCY? zHddbfAYI$up`W2a8c?DTR# zjBR*=n4kgrpBA0384DBz;BLe zd#jS_EEQWFgC@ubo9HOcKfD~M_-Sr7sf9-e(+2zTMS(3C{ei!!JbUDqc)*CsQyV)o ziPY@WTEks})6N~6idOym5hBG!QH|jTu0crOJE8{-zYzlqJUjAAA6pDALOneFAO7xM z@#J7&V0`zMBM(&wF3q`82NEQ**Jj?`OJ=a-*i{8ujtN{4S^6Nla;`AkcR(}lF8@`` z{r(5LXNRsD8|8wv56erme2QGw13@K=>fN;ox$Tl)k&3~SrKO3#wh+;IOD#x8qW?L2 z1uZtudf0RGvWb;oJ5846MF9;C6t6NJ)wj^LfrG;*?Z@iee>)+vorhVev!!x znUiu3=emCF`}yjgSSj+v@L-H2IOew7?qhh`w;kx}5`Ql3T7(XaeTQ)fID2Z|y%)Vz zjRi9WvUDS}iaxk`yfcknk8~`Eqay2j;orCFc?*6UXhwRKk(R7M(1%= zB{GMEiHwwe{&XY5Vji9yQmhUdRoA_ru<^YQU?QI3CzpJA$j6`pl0Y};u6!TOhEj3= zNeAae1}OqaWm}Oq@?k)nE`p7SyEmMk0inM6=HZ=)FY2MYM=86@2ny(fg&kQQs(0gV zDM8V0-MNvgxfZ-P`<@>fCVM~bfkuSG_QrL~<8r#^FR#HmC_v~RvOHu^QP5r^R`FDf zcz@01bB0|PEr0Ewj-x3rvg^8qFB(C;EQDS}9S<$uLj#M7tY{HiWi<&7{_Ws>UW6!y_dp+$#u3~y5_mwY*M+(Yk*~xg;vzO8 zTAN+uX8F%_$*V-u)6!z33EMVe9M8At>iqBX^IuZ8*r5?rL~YDcUlc@*EJ-Lne_%I~ zm6rgsAo2jqNdlnQk!umeHltC|zDpJ(8im(O^*p5Sw14Wm=x?zo?m+L{O&fXdjt z=vwvsx^u$y{6PvXTJC3>QPo~1d?NF6yN#bE-7YcumCv+1_6A%2K^G+OYO+Whf zNRPEeU41FDRHg+tDyOp#2x~I!;s(DzUPTmv=6gLT6RGPv2KJ0?eos6aFkE^JdKAp1 z-KP}D8=OI0y+k0;X|zP>c*V=EB)vQ6J6_~7l&sB1<6G|<`8Qo@v+Aky5XtiHEo*VO z)Rhq(r(PpNqc0wT^>wX&7PWT8r;Jq>hD9T+J}UI48&;~;7W+rSv%auvc#wXgNm_7V ztw@7S_J;H^MaIMavReYQMY(-4N2Wa}YN8ju4?TqjUxjC;EIoC1oAPF`_fFAje&R;Z zCW9BNl+FU-2T#;8LtpZGUi8f&Q;ivui#G9#K7s-LSJKb0=H7#yY_Dk4*7A&d+J0yH{T4^&pLbb$t5Qb)zD=Cw~&M z-khhS@}M3zn>=&X-C#zb3PO|c-n8qx2MwyIOU2&I*Do3w95q!QddtnnbN@D9VCKiP zr>LEOWG(udC*l6_4`q+2M5wr>Zl0)Uh@4XZ?JF6G14c9PW!LhJahbQj)QhWKEWVt* zP%rvq57ct2iv!mmeC#*%aRO+sL9W@H}B<0#l~h?PEE=?DAstqIPSZ z`B|j7fX2~;uI486ZbeJo6dOE@x-N z*1irBNG*3`XocJFoy_kZfqpL8XkiRKdQ48vyt#|~Ny#WuC&Nj{?sat5>jmZ|`eRK> zZSqvMBGw{DS6^<-j|DSmRW3#e*aID9MrxntVz4LVWjl)XBv`4hu}w;8V^vK$D&(8I z(nYAI^BThOFb+kd3_>$C%=)pGS9pw!&GAaqZZ$B|V`#V_ys)z$AMR~>0a#AkBdIIlbB`GUY0AQq(Cb8f?I4d}N$mD5Qg)5^xB z%jVl@^DJ_=e7pzftQRbv`8fzX`?=?XU{F8@TUFm@z(%x<#g*ZA6{fP7)hCrz-k6K= zNmJPkL8cyZR@X7o_q?*51#(b~heKzU+$y5O&A=LayBQOHi=Fza#KyoO_^&@hR~!>HZ=e0w7`fv6 z6&ibcUu;S&g*HM?rz+1+wx_%A1)CmdzVh}l>scx1Mds2-6OSH}gjJ&I9|LBTBN9Z; zD+DKeA{yL&3uF)enC|AXn#dlc2X{8itjPK~AnH{nYAR?QLP$_Bn+0`CUjWn1*RNc* zkm#U5{frW}Vh7Q(pO$HT(o#K3a~&3@AI#C zEPU^K4^L2*%w2Ls+HEF!xX#RSaBpwa$2jQ@_v?5pM#o8Zei+rK*k!Y4E_9B`h+VIq z%?nWpAkw%<6ta6?be|bKgNDF56#U@PFYvnE*=$&M;mLBFBR6|flh1YndcQP@)dE+e zl({q6FRSrjxN}BUTWNkf%(wokxg|)_wkpBl`un$w@?%fEW@XMT30LtJL!F?T?wL+w z4^YT?)xR1m^#cYi-qc&}p3>8^J96zwA;QtML&3(!xxoa@Br{l9f|*A?Qt96L3#jqQ z-1Fw0X$65pHz>jNebA_LWniLn{?Jt0(sDY~A5@z&&-Nn)XN@7p0iCF!T(kblw{5db?0oi|1dHxID z@xOZ}0RsO9MaGLhA|u=?#n)t4;C)EZS3wVHn(NU6k0t8`r~+h&`+-NVnlZlVb)tqI z9{ja81;t-IbGs$%xwiRyYy86WlmfWM$&*%qA+X$p8kL^d?)FHu!2D)!-Atwf=bL;R z1T+iu9mR>i;db?+OVcq-0#`uDcW4(VkvGy=e`wrmz;wZEB{C6n1TIcl`bTD!kiLrg z_;b3A8HkyjoW==r_u&G7+&mrFFHi9jYX{#7A-66o;pIeywf21n zn=+keCKi>SBDigM(kRpV`#~Z(O;64>Pl|x4(ru_BX7NIB+Yt9@PW|4Mx-?cQC8_MS zd}Cd#^g@6aS&fgU|HHF7uGGn zgBrQ^`4r6cZpp9iuPR8X+>SYaei)H!ow>qV{8YP$rP=_7w@K1H1pOLgB{E$WgKQDe zO=mrZem;Ol{KX<|AI?RdS)^RD9B&{bcy9Z#h^iop{92kqF_8wiXuNpsRgP0kTY0fi zYFSTxNJ1I=;Nt+QP+Mfd`c*R5b!w5nh`BD(?MuRK=%Pu|HtaUU!Np;f_Q%-DrCr+C zi}*x4?VIKrfp<6_%AA6G;qwwWm&@FmnJJPeUZ>0G_y+k1t+}bnjPaGIE%0!Q-U(!= zWY9n?rE73zqrREsf<2KbO6;DB&pT-OM+QFv$&en9rb%r0Z+2KppkzZ4Q5!k4w~tOZ zep!_`kRa3+?ar6RwwB8F!D!yKc(H)?Nrek85L=X}dJXb0ytC=FqIC6+wPjJ(DVNC# z)HtN%ZWr74H%|r>1Y;kv@tRJa<@|O?Y@=W3C?xe~3)YUGY`j)!t4aLD`SE5+vUZcY z`irZ1zYP;8EE<^ny&QOt%!td7^tLBz&8g2K8ig~6W7wJ;MyR}_svwp-)x*!}ms6OT zvgC?xB8}4w^5^$r`FXBI#oEj%fwaH3oOJhS*d%MzKgkg0f5AFrPC*Q>fiBc7ctSzU zvUE=Mwk!~YSl>`}DsV_bp!seDwZrpNZ$o=f*W1XSJhG+@Qxog*$PQv{sj5HATC7gN z&;G%HL*;v{WvaD$C9NS-P>bdJO6IlY6=iUEtf&V6cWtLFfGCWLCVmN&!|4JyNIyQo zCCJps@)7C2_`B+7=S`T(Fqc!^)w8{=HII)xzXRf+Ld4*hDI=`?hZWsjGe7mm-))KahCWz3}R1k&5d~z#}`oi zVy=vBksj)eh#^plv!>FQb;^spC3#HH8M-LkED z$#&QA9jN*ECUtHOuYtbMLm`A$aJh}i!AVw-Qufat?ytGSd+ zhXgX#YY8H!#Id`nuV3%zAwgM56|d8)HUHYeZa{&Nxei^Durq>3RWz_}$pf81sV$bP z&26b=5;T7|l7+6ogy2s$IMSFJE!YT>U&iZfbwcG+o_yzVsA`bqNLdef!v3twXG9_DbEynAb1QqS6Y0Fc?kM1G|B&Nv;42uNdiFDS~lPXBI7&Ga!)VwwJ; z)>0MC>u_Y7(KM60vVzDj+4yd#3vmAGv+wqiGVN~ypuTsQqKiDm29pT5d{-EE!vhfSvGwXpi((^n^| zSF!5L=}s~Yj&luPqFim?esxExNGY6Aw5oe~c=@phb%}800&Z}&8nG1DF4L|pHzJ~O zi9elX{psDG+BdirDb?Rs8*_g`PB$yl?8dcjq2w&RQTSOqY3IXRauRvWvWva<&+ksz zPH6lvR1WcH|5^dIvz#irOCaLwRSrBbT8FR%*`x3xXGBIDoO!DoO9zThLWYZd0;&hH z?$h=XJsK4Gm4+K^z4|IxYh!Ef&PPa- z6FUv>wxY8nV96f?(g;Pl^tWglR(+CFlz3gzj}-Dy%TamZ8*P^OQsDDVwvl$3T`LY9 z-=hWTMGXhaoE<-z_mh38j~`Vaox+6EtnbgkotCd^y48v9xOzYu}GiC-4ssF1$1FR_x?^`&T2X^A%%!WF^~jz z&q(~!G7IIxdM+dSo4V2SsfCT7J-qcNjkT_Q_$xU!X3dRJFHEb?->s*BxQXZU+3~c$ zr8~iKd5vcfq`RBUFk^e_Trm|U$K6wR6B?M)Ot9#)QCpj_@(>}zuzdY8WBEd0dH_78jY@aR2}S=7)T%it%35)Qj6LqA&u)YdxUi`#e@irl9v(eO)jiXG4|N!~c*cqyi5uU@ldaqF_D^YC z{VqLkl5OEvaSK5|V~)o!q{b_^lk-(}dkSmk zclAx1GzYKsd1o)`I+axt;&B3Ll=MmwB-T^F8Z>dB^WEW;t;ieDkkwNR?kYZVA{=XIR1J` zkNA3O$BQzW<=(w@Uw9Z$lnWDqbv%W05KIiH7s`o451yltxWESoW*@J*Q%2e;qa9*M zi;i&Tv|tO`7cmxe{fVnZFM*@fU>#b1iPw?lL+B+{E(_$ULU)&GmCcuPg&S5mo@O^JWMz9Te_~Ko@i0!wP*X2^48Z+6_A~@5b#~_&%`{G>&9<_x@F|7PrOJ=M2NR z0T9EAqfXr`fKtpcmd>jgXJJoaciWB0s1&NC zrYs*l-sG!cut^26xE!5-WX~-gSbLbvRNd4~w;}dcz|cq@yvXN9Q$Uo(qThH!W{4~J z`E{-Hx-f^aW%(0Qmd1yFWH*5Hu*0JKgmPcW5W}5=i&oBgpa$PA^Nf+Y!jSo9oeB%y z4p!$up1Qg49q;-Ox5{AXB{=?pvW2aMdoFOYSJ>YOES1mE|Niv*v1%Ym;)~5VLysR+ z5;$iN1o|g(G*@t`DD2Uu5pMlTxfwNI{EH)d3gINY=>>DD_|E`Mqc5*%Qa$ar$p4Y8wVcA=;(;ZH9Qd_iAMvp`_sKT) zK#xvY;g|J)Bn~$;{Kk!9dIyMwG*To3`Ws&?`bdzfHdFbQknO-^0{ebcI+5P}U%xLc0l~^;kL)K_Zg3j2P zq0n6ZW97wleyuGJHUU4G%Rs}|*x?=V-UACWk`N#ak-+nV_6dM#eZ2{{B*wVRsP=Sm z;zQ}2j)j6|D^{0uMqG5>*xQK zeV+%g@2e|{4PTj&k^SfKzq0SuWTh3JNlHtKNj-Ze3$X9ipNc(^l$REhR+m(hRRd7? z>he+m`(9S^f7ti`!@mC?_Wl2`@BfE=|NkTV{{QVRaW1_Y&`ZOGH|RBO;{Gy|ce9gK zt=s%uU991djM%pja~zz5G+m{VRE(@zE{f}(@4ziBoL0crmcE#RnEaTGh7axz;#LK= z54@~3(wKd2(3ZAN7?lmLySbumsee!h{-6O*!<=E!vI)Q8b7|^tM$0K}i;7U~W93ie z_Vutjs|HoutHX=hXZ*|uTiKZu1D7IWE&KpK6|2|LWGcBZsG0R>UI9t=ARi=ohQ5tM zFsh?-LP0csJS-ZOv0w8L|)u4 zTGY+v4Q06d0>Fjcd;?Vx=|wog7C`2BwhE_Z_9-_pnb>VUm)M4{HNpI7jVnf;!`_}x z(7`dp>MCHS=%JHE)>7?Qti;{Mr`90ZXMt?2*}v8(tCL~#Mw$@FJeG2eCKk#MaUpeh zIyB9elP52QYCj%)RrioFG}p7Vo;#oE54)tUO>ejT#KxH%$Xoe8O$i=A{~6q017qyH zxV~38AT9`&B}+=ZKK#kMG;Pg0A*wb+dj*leP7}8uu@zqH1~UG1#6I~d{rF~gVz=qh zY$;I*D;`_mJcJ(c3373$Rtw@8ew~yiRs8OubZ@7k4d*O_4K@XB&;g>Eb6S?gsuV@O zoyS~eb#t1l^j?nry;UGSM@I|J^(-TSIK$o0?I?r%K7gu>=i$EfNyfIv_1;BnEB zri*ARf%PI{Bv^px7KmV~4?C3R%atkDtO9zfpY7MNipAmVG;b>p)u|;Y{vx_00%F_n z#u}wyyM#V8vA+6ON1E01$5E&uGh2SrMZwzjCKVmCTJbp23eT!+6+=XvY%rYS^w$dh zdkM?YCS4dHf`z`G#nkfST`P^++}cC4iy3i1xOAQ$yD2T9le%6T^r`V+Z6PV%-Mje$2%#q8u^&Bvpppn z0V_Hm@%cwat4e{l>@(D^2-MJ+JiUI0RJ37gGp!XKd65t@rbArA$F71|AQIVP482q{ z$FeKrU+uN)9$rtKmc2nkIn8~OX++VT&*$_83Rj2?nSRoZlWb8Qg|m?ZYFV1GvHEhW zbX*cXtmKSNCi)X(&{_(?$2NviCa@E?FzV}ywFLpl)V?gykF0Z&7P?JTSh-lxo_^eHI(A`elb+1JM2ud6XMUkoa3dRX~zk>5apbI{eh<@)f2KSnQ=6eRT6n= z*MTpP^wHbG)<{<)P+qkF0Mh{`1U9OiF0-Wj80Mr^6;lVKhnr1?RXAm1y6`pOYpR>g z7RQO<8#J9uWWBkJbDdA&yjSyDxkGQ?%M1N)7g33W`2Al)F4$&c=!<8(4|Y6JJ2vuT z_!)R;%N^LNQskdni6vyRsxev@rNC7^olrZCN3E>4$>5L6t(>(qvOI8}26~67BV)sxi_o8iDzaOY+{_FI%}Ptc-0?lBEz#tc8w}=E1b?ZCub%YyMeL z*1~RVkKHZS(cN#%(z9`{q6W1v0ay)7FM`cwSqjS%-a^M4b4TKNU405vw#!SgW~zy< zL-l3%)dUSmWV2DTGADsCFI9RGP>}FgVh{3v5!I#+Hrzl4A1$8pgF!w64X<^K8%1N7 zUP)m3mw(o%$`al2k_6$9@4)tv$f)U9$80L)G=6geekj0%2m8=vGd(^xWWgI=%}WUT>E{|0_`G(yI+&+4 zrxrwHo?U}=!)gQ7aIh#6?IqKQ|7 zE3B4ttGqO6o91+qEIYp8=*GA{#OYpwrBJwEXB7Oaqi+DK4?Csehr6^UAFDuKtwj`$ zT`$IOV|{Q6mql&lctF=M+l`p4g%rT=a|y z1L*^`3jaPEOYqL@nSXnuYfU0ki$pZnhDc+#TEX`a8xpk)h6t`2BQhMf=a7}}*-Yn8 z*Ot1x{8iWAHYAg*?KyO(#*tCv+D7D^P^pL7K)F_ME#}XbewKyHa#d4^n}6BqJbd@* zLxohG%(jC>IPn_(mk}9Rw)5hH!@+EklV;q1#zTWf2#<;PTk#8P#6Mkc;;R%yNg(W*52_I6FLpjLkuP{dN?Pvc!xZMV!PzPQ44cz%P3 zoN#pFC(|?@8kmco@?VSZsqr$qjAE4BH{ADM`drx+Y+G6E49_5?5!GxkQ8+-*%7~kl z&A=rVcM6AXjv6O(gszE*GW2TEi>sQLpo!&+rsr30S#fG*x+#q1a`iy_fv*jeiqLQ% z1o%hx_8%FOdcO6tZ0U+px_+fe)$lin z`fFcNo%;KzE<^%PwZr};=04G7#T+_s7viywGXHHD;3lc=OFiL!|BSPzFsq^ekBV{>$d=G0IB>*6cIcto`A>(D-dt5-I3Qs}K-ltEGEf$>x~ zJQ=IQ_Q$2E!};q~GmkCmX78HN4%N`o^qX!SbNu-0tr&L-4_D=a7zeH2-L@sD8bht- z-gQKiRl33-dn>5yZZGlo`6J_UY(Fv5?VBQXuN=kE`b_O6Vv`mxk$AO#WCG2PS$jo+ zGd8aIn`Hd=&jyOTg86WTB}&{lHfjpZmuMuLV4!o#e&auAaoB&i4tzvI*ZX@nUFyS7 zsxayNYU4LSdBG;?<<#1>Tkh{>#koakn>~vVgP;e*;LD~K7c74ln0{Xj;J#hVs!#j2 zZ%(qsd3r?8+kx(rR}MO;#J~THH)PFxT4a#X2L{5?`?KIzc&C-a>w(Y9+82-j=z1@EsI0G51j2jdpTBUDN-=`v;-y7)9t+X zifjIv>s77SZ?)jg`297St4@_Wikr>1hwOe{@evtBM|^Y`l^+o1r3$_<#V!$aAZ{}- zM~~QxqC~$N_wB(0RiO-Z_xR5Z9}D1kvYiI~HAe+cGZv35_(vd4YcP5Mg{pTJr)(M^ zwvWqIPG=aAW(i2v5ZxPopKKs};oC#Wd7E7=Zfft%C!wQ2rZf27f9iG(N0vNK58&=NmApH~5~1~m@keRPhK+JfKO1>Z%p)rE~jY_d66ZzcV!r@y0E}9>V>Vr&%`*83lliTjOHA4gO0l-nl*r>e` ztoTqnf~{srxM#La{?FaY3BM85;7W%(nElH;4MzboP z$l4Gf*ZXqF)A-BzOfvYI(V1KDsZ*cZ0UT%=n_DwsIjcC;E$9-f{e2>4YieEwEIdnb zH#BM+>3arQ!^q**V8~m%L1!#NISQpJF_O7K}&@G~7Q117cNLC^2osBSl%K^nEzM2RdJE;%xqb$ny zi0goHDKK0iTv7DH$(jm3fTifZ=uIu?bCZZYmG}KxQN9t`E9{~rnhX$mH*tRjgr0!~ znWn_I_&~Wpb{ELd#V*|0`i3Yz4zDS-n(O_l9SzRxXo>4M>*0#uX4}_&))E97 zX}WKI$FCWe7AC*N92HyMF~S<1K?<2?SA1Jdlg`_8S0VxopSD`w*UY#twBb@)(o{XA z=Xaj-LG|Wk90HvOrNUbUpzlGhuL5@9OMz0jO;C-do%cn^awg8!N~& zO~N|V5%#G0F$&gEZ)~IdRQBz? zAxI*dXjuXD{&%l7rR(2Az=~Z*8U>k7(+QPZ$xTLW6?bZoZr&(OH`7CaadGRdv}g*A z43ivX@OEurx!Ap$$<&&r<-v<`n5(bHTzy9l8w6ZRngKJnNarIy`ThM-H@<)1n(NZ6 zrzk5vqP+1uOgmO?kmF1(mr4&t53V^RGO(<&B!E1?F~|67le9b6bY|!~yI#ilwl*|{ zJ#%^!G+ZS~=YfQ%a@G}VEnpBjA2=8G(`{UVB9@r;up>8v7c&wvkY)9Cij3{&-dYE` zQ6zzGa{hbQhWXUJ@UXZwGY1(geOK_kLCJ62D*O>Y6)&alm1N+8zEv9Ib-ldUk+4M4!sOK(qg#etVwLTu44O4;ApPm6eqvE zP-rXa-t|Bvi~GR@Nr>oMhn4Hrdm5~#WHfDfYWeqt-lG>IvJT&D1l6iUq_6icYDilz zVZ&E88!-l}W1m#%cMmzw6uAcC#rYH>cjnuZkvq(!dlkI4X|i0#5isu+F9y$#A|NP| z#6mRK$^ccd`5z3&QFh#mm37iQ^d^~}kXjV17FAku>y?GnzGBnHtK71D= zFUtDVotE%Inmj6S@KFqEA+Z~A9rB+jE`~J5%q@?XmQT~Ik#?%qLK_r0-}A4n8A_>< zzDAzgw#($H6VZ2GZ8^fZ8+jFxCxPc1BQi>zp}{>M{L5_6AA!WK_o#Fag@AKDPW3w| z1-8)C7#pI=#%rv{n1w@j$Op{#HBd|qm;oL^d}lHBVoE%|=C?te(x*5h*Cbo?Rk>nT zS6Y^GXbnUZpX%o4Zn3kG`_3>XcZtK3+MC*v{|D!XX<+Id^AZ%{jptu=PbcVFsB@JB z#%N5PBJLX`XS8O|X!i{z;reB@6rot#fkSIQf(lW$X2$mxUz6ji&Bo1%Op4Fw$X6V7 z@m=JZ*-@;VYjWLSGNM9r!yjT$+59%`gUCC6QoUx^$@h%{ zWGT*`!21x$@3>?<6Lw~DObzey2w9O^5F7i!mrt^DJoPrV7Z@HMx~{NL&$-#=1YEXE zOKl&5eP9A2gT_0J=kevE0TxoSSBEqnU3>4l=P_(WxYHv|S_8U_two*jO#RDVoTU zWn?13NBF$Z886m7w$G?Fa_|@s%UXe|4E90@pg>*R7NFXfg$PVMm({S`_~R!(>!0ge z$Gj`uDGRIZ+56tJ z)+W-N+Vo#vp;??N2e){>!|H(vp`uPy%@&b<&E$Izk_#8U26$#I7*)4$h+jYLFxLIF zhJ>#_auqa}jIYR-sRwT9ZgZH&JqT%ve4zDH+Q&7+a`_qiiwMNp z>Zs!^070CJn_bD-i$Eig^|%;(Le3qCIeOoopo{q8lzkVgw-lFAxL=zJ+;(-f4NZ6M zGznO~;|oZNu;b@2SY_jhJU8LEa$4l5pQ<0Eq#w&qp_p#+SE9)7(UxjUz(KyX8kfQ|alhwl;j%}=%tyrkg!obSCni2oA zXLYT~N63Q%0Usz3FlgDkXuiHHt&SKp1&d~y9u##VkTYF4C|+$99AV*D^YW)vmt2&a zvhlCw?SKGjmAPgquw|-7q+@0@7ymI;|4mW0Puw0rq=K%l!E5?qKxm54!@C&a@N)E| zgxI6s)s_!5X|hKIhWS$~*xsc6OjaA6TuAyfHi+PYdAKR98MK#rgd^xh`5PXy2U~j3 zj!9(vwfje=$zXNwnBCs>V}@{C#Zu3ae#mfoG?wL}Ras;`)}1($veoR7>Qq;(BKyZ; z>+iR|>wOH-@en)P#&@~4hmJjh4oc1oZA1FeEt+lwbZ^4c`jz5#ZoIzZS!`8@I>O*`_qUUz$Psn(VSG>D_Jr)h=cedf3A~0{80?o2o4j zk@YCXF7w*Gf)IZEx-O04adOWhC@2QM{pT93sdQ zRr{>Y$qiUBXrE`gQ|;w6XGoJK0n;Ya^?x5upM^61(0H0%whwCVEON9lJHA2FTA|7qDTt&Z^ktKkzr zdDT8ti2CDM%ZRRkvjjG~{@L3<^L0Q6>;t(c`6YgWAVz!z6s(+mC}Mn7jO|;dWWY3C zDn0p6E1o1gCXc7=8!xSX8bJerGHKClTWvvf-8@$lgr9`kw2 z+B)fAGd(0zM=($1X*CZ*#}K>mx~sZ511HV+O-si$KqsUPSs>h6`OK9_o6PaI{{BC*^w1*mcdZV% zVhklj?D7}U2CX8F1XUwv-ies%X0VNTjn|#^`%fuuQdP z>#btJd(HWTyvv9R+m_3C5}VavGdq~tOu;&E>2{{K_Sz1#_S6jm`_C04gLZ>%huV88 zOBcP(N#pOgZac+YcYI-}&Uoej27c)?ltfh zo&BF`?=-~uG7E+-0@nacqJ_gQz<1VbB_|3&`RLoiN@i1yMQxRfDN7wQ?(k;~J>~s7 zE1B=m4`|B>*xCp(AR#hE!-x!LB+6J1f(7Z1A+#68t;#OZdC2b4L>c(KOV?>tMX7rx zBmQHMRL`Ref94t7BNP96PqPQII-he9QZV#p&PSeF;k|kc9B`2eiv&bIg{p#hYdstA z#3M%{U*XQN`QNmp5t<}{&zh9^A_`F>?w(e2*)>-#Qw+gxLxgUEuQ|`E0o%R^Lfja zte>Hv3HKP18B_!a;l@E!Bp%{3Ea;Q69HwN;8h;<{!N_ylrR@=zc027Nv{5_lmbVG{*6Sz{n;_eS?BCgIEBlGD`z>JJXj{ zroTof4_baRe|$Mt`*d!g_dOGuD&sO6c!LB-Q%ZsvK6%$D>db>HewgeR-?&9@#ARpG z!?jEj6TXyS?t&@W!>+aJOv{?;AdZe6viOR2lS#(WO>e<#-+w7Q$X~7eve5mAkke~l zR5DiV9AHs9FFi1!*Y;-)(8To)o51dLerm_K(a(%ycy|0>Y0$b*9a1uado;bT{6# z8u4Nea`En$t)yXmn{w^SC?%x9BW88a&4SfI-fed3V(&fM()-_C&_nm43HH9dSeni| z674ydml6@&%zEy~T@COpJg_%Bx~lZ)&?cJS1qj~%(8YYJlu|#|g1({jfHxq}F;am# z5^>r0@Y~FA>N*GPorW-{YOWJ764%RIpUkG}nzz75?kUwp7H7I$7muASUr{#2mfBcVl0T^O!al))wT;?O}b0{5I zW-MH#wsFg131rny?3qdZ6Md2y3wKyk#fG}>Zv2ThF}ZE9YP1@?S}f=gTw0g0QQZjRHf1`6orkv{=itRn_odO`4}fCTG-tlrdR!d*YMKZt8bluC zTrnju6`o9~dGJ#a;Sxvz0a;aJwTVcm9X<=w1)iyuURoDzbHmK{3XT&)dQu$KxWgKrlc1}~HcK60(j3O58#7t$fGPH4Sf`2@0P7zc-d#d1X zmr#SW{Hhnu;!+0C;8s~ZbCufK#0Bte;tR~uA(6RaVdX6H)JrHUh&rw5v2Ej%!RTQXmZ$@miV6;yr?S zf_+19uuFLXa?uIvnp{5re<)Co6z{xYPv&pmn>es}_sW=;ok-^OJB9+?K!wEz?sSTcH-@uu| z1g5z+?JSjConp9~SDRH^Dm(ltY6o=@%s{cLxkHqs?_7ke!`(GF6HxCQey_y_my@g z`YSa9D@(6piZyvh8m(^lr!-u<;I{m%W0-f&(0bO^GCJ#0g=(%|vOsmS4lgi7_v$jm z{MwKJoZ0t-B~qLI!e~+Cs`r*5D)>gXUoHV_y%x`XmIee9N*PlYbT)-Tbqc@IqM=x> zZ?bM2RbGhz5U>{<-Pb3lDlv5$ue0f!ZJES0oV?g|KUt5!F67_e#odx}I1(_6sE+F1 z1<#Xs;w0TVN;~DMbGtV={+P|sUd*F#toZl+|HYl1peVLM^v@a5A5&CYSvgzKdQGzNuR1R z4%>Zy5h1FlG0pis<YK;frL~=17XKrAcNk})$b<+**uz-H@*$#?y75n8w}*GV z?{uj15fFSV^tRmWQvrmP=4rmv3Fi?`W?q>1$*l#5UZ050=y22gGC#U{A3IVG8yh z$p+G$FLB0Gs>Kq_zOHLHZef}eiWh35g&FRcxsL|7HU%of>u9h@00a2r0 zKp~l*`94t7rb>F`aPQrhbUHNuUAmphLX8e%fxq;^4hI(}CwxW>bYQc0{%RUsVMsdKUY%OQ4C6 z6m9S2<@87_O+lD%%!1SabUgD(0;EMuQHKZX>xc7RE7cwXA9Q|mt7opHN;V(rFIA2} z84SGGg|(?V7bIsmud{70qB#RJq!$3X^U9lhdV=g>oL>W5@xjqHBGT0P~_mYL`|0P z9`=v`k)5LNsmzMd9D5HUvQ+ngX&@nGfro29OI?ZsuW4iD;@(9-q>BVQ!P|;FA8X>> z|GaH#$Y4{`_JJ-xj}#rOD82XTzB5@XmV$I=89!9v{2z>|1OU-4{fQ4=)!XVFP?Zei zdC3l`wlq_!w(oioog*c3H6mL%ej@QpDc$Sz?i{?F0WRyZN|m4Jh_5FE6K%(9vtX;J z@RNP!Tgq~P*Y%C~U)yqZ=_UqL(!(Ub60wQ_rX|~y5ua3fnVt7hM3%ADfcC=#9)=b< z!Yhb$E9PY87%!oV2?*Dl$#f?zzyVBt#B_Z|%S)9X^|Z(+DyZWeD~?&_Os)@%zt`6* z5re?!xPSn^tUH+L@&ALpw~mVP>-$EBkS+=7kZzzkq^Qf(5Ul!(29=GwYQ7$)ed9x_2Hu}rxM@9Ryu@O>rY_*VrGrx z5J;6@y_CtV|3}O`l7ogbah0#kb?vPwbnql3{1Wi2MfV~b zAqA+ITeIb=4FMWtIyC$V5(W+x`6+-n`7;0%IX&g6(&w{Xlu-M{c%>CcUzCC zWY?l^mui{#3kQ2I@6Zo;4>nG~hb*_Q;iJXPV#%AX^g;vc57sJtDou1S)5Sgs9InSc zCo1C&A$a~_eV2uv=j3ms4Z_{=Fk~Y=t|Q=JGh8sx;dTQSC;MdK&R~iqTeUr1&nQVP z^=J2BGMD13)K9o6)nD5usL)`oca+$dKXT8kvP}_$Y1vgy1-p=OVXbK!v3aqMtUzKN zjY^}VLojBBnuGS<8U*-}?*|h<49b@!q%>hXrSwcTwqQHr1R)tLkdFquTDIO7?mv5clzM zA|`XRGGc2yCMBHF&DA%{fZa6G zlj|&!N9Jk@g2NKj#0-J=;>$e1;;{q zW)@g7c*ZUq*kLwwS;FTOyDy!&I4EZ?xY#(`iuce?(l6h|T&CW~NgJ+rJAat7q~87T z`Uho^hq@Q zk`RB|Ji^lj$9~C*zxdddvp4L|6E6C<~XmFZ|H`fi+WV9$dU$q9SS_UitPXH^oDFKQ()_kOUdJ5gDwf|80VVWM=6$p zQMC`gH%M6T7A_|Wb-6RwJ*6n+PBL5UaauE)HiMMSr5%d;e>j(`=JJAAfH~6~^}{D~ zM;BYV_t>V`-)mVN+keVA?}b+k_R@w5+{cSZ^iaAy8Gs8=AQ?{K292}l*D1mEd6)(w zlYaIAZG&@_-z4TWb1->@mX(99|A6{V1LaV7jwk#&w4tw&?fl)9PFjRz!9_K^UXwCk zb9}v=_KC)9KK6|sabXQISmCaZTQx1p^CjDvLy#AId*zujCqVh{9{S*7@G z@Xpj%n!qs;#D0A4Yxh9>uc#Kk#^}P~X!r$3RuKEQ zA&3!QgK$xRBQeeScv`B75^J5`$k(LMMvFHU^*mm{2(0-2|IOn5VtKSyv+GglFmCpfoC z31eY0G40;s0;9AQs*e#r!j5>c7x2-NhqIc{lR=FOn8!D9jyM=?S~|EoLK>)fGgrDh zp7XOBbBeW$e|#@xSG9g(!hzegb$MYlxBArNUgyn|1!;a`T!R>Q^MIeeEiJ)o`)=Z} z3jzJ1KXM-|!~YZ{Z&wQ=IK(S`R=*vEH_t-MhwtFF36yk2>X^ zLd}=HLunKB>=7qRs4o%gRk>f0a{a|h zW&)E_w5$m#bxNKw$4uw$p$-an;1km41>!z0S|DYqdoCwJtU|1}ZoEb%;FF37jUS|^ zobAlYT*HmvR52B^Qd4F0bqOepYZDBNeCj8?2aT?`l*pQI53TMb8&s8A-m^jG-xFkL z>_WWgkKbQ5cbGPEwvh!bkG#BQ@977E`g#9eCjiFNFIgXo>$*;RjInCL*3yAub6(JXOruIM*w|O#n|($S%n%DTV(?IYG%ngflKLh(Nab}D!L@Ox*p^ZkaJ&)xR*_Qwngd)c-LV;P*` zoo*_nMd9}p&z2N`l`PLBlAH3I#w1o~m^B?Mx>N*(Io}kM4fT(=*~_f;@R5D100ce? z_ZB|66fj^W+lw%O&=58Ic==KEbe1K)N;a5!WFDWOBhyux>_wbgJbgKAyg( zKX=j@O8IJAGtE_}YNt)q9a_&6;hsqrU$9dyDEjlK>o`Rf)ilN}!{hT5%dUeAW>ML_ z>+gUbqo(u5pYfppitSEu))8=K01NZvgBm)qQ}d-~K4jv#C%n~4KVAjy5@&7?i&3n! z%at=W9}i5@BCEq#F3pf0_Uv_l6JTVM?eSZ_+7^TRa)%5_oR5PiO8i(mM8-Rqhu6I* z+)Uu91R||Zd^SI~Z@3w2d*JFY#%-PuRf(Qwb_xIX_YMkZSbD%(*RDpYn(btJ+u!tn z`T($9^(HXVM7V;N2Q~wsj07%vdma2age@^{RYHl1I&yiei08QQ@ia`MZ0aGwlme9T z<`Kh~SI`G(E5YWc!HY7VTDDsXMk+ikCnp=o#Q_OysYtHFp+%$9`QxP^Vgbl?3)>OXf0D>wB z9HCVa?G`Enx2B!^+}x5il~{!1`;2WcS>zvC0A5_iv$@LR6H@9<5Ie(NM)Y?PLf0mmMM zDg=aymA)MJ@cGPEt0Am8RxYpBk;7}*LRyU_KOJXBlTpX1z+TD`*1a!Y-LoTRSy%At1*h5ACMqvEM*^uisAD|JkfXGYPC#Y$&7e#1ipJ@EM!?5zG8Zm3^BTw%p z;R}pAc#1KA7ZR{K-$$Y_!+-W-OvZwn7r@f=)jA$k(*9GdLgOOV()#b;*!Nj9cw{`H zBO4b;lv__P{_+6&4T2EMSx6-uk#qz0sL&F@zfcURyrPod-p;(u?mj=CIhra?^%Mu< z7TZ(4{AAu*np@572r>=FzEpz~FBGd=FU+Xk7of;2Oakq6XLr(2n;}=ME=%CW#3GHV z$1k$hJ3M@7`RR2rayEOik+iqKI)wi|oj<~^IIV{(&wS>BkpEbl4ENp2fU^%{#+s4B z=}+;)6q`?XHN%il*IR|~RYu0Y);mT#-$UaeMxs23;&0T=vQkT`LnZ^htCJbE_>z+B z2vw&N`Rl5kjPV=`<9*75Qg|=LofV=PoB%#38rn6O;RCa@cCgcle!u>z&~UZ#IMLKd zM<&_bf|DDU0SkNi*2Smt!_I1xhhg_2taN!6RXeoV@WMMOkV;*4sIWbcC-vyO8^y=h zc=ju(;*P8Wg?osAU!o&g8aL8GE@mwqe*15FW<}{OGc?C(cy)&whA1BDKk-K2mC=!x ztw(O02s}pu!~muzxvc^8sXfJHos(RHUXhaiM~W8^cjW%2(s5J|cW1_B2f82u1T6<$o0VC@dXpR(oPn z<)c&Mex%d8*;^JN%ZPX(x0Gi9gcQiQ7|$Ejj(CVfQjIKlFIb?XUEASRq_qRkWJceS zc4kXMRbEwVBVoRfvdm;*vcl{UIRn$1u-K!9Dur@qul#!VGLuiK!Hw;Br6=sYmKEAh zlHT~M^oa-fhu=r^_X6?c{5`DUvt|)VTk)YZegoE7#@>fBGp{A-uhv%Ojn}Zw!Owv} z1wk+{&@BT?%fS!m$|f@}vWC976R=M}A7xTtl02(@oPhli+$m?HB{qq|JqGMxcL3tL zWc}va<(WE$W`-AQwEpFdldg(+&2pkZUhmt&`=vXL8EJ(o*Qd^N?uV~F=vK3Ik}fDa z$Z@dcf9X%4q7N~OmYc~&;a2&{T-}iYclvO9)|EU9siWzjMEimaeG6?N#CZAQ1pAK6 zz^Hj1h&_*4f@Hmo+x=GXDxnsKNd!Mq0hB(}11tM$7Q+5dan%u%)Pp(+6?V}$*5Fn8 z{rI+&!(pkO?ue)R(puF@UU2+FnedDDbZNvh=HjscMjf-ETsHfGG@j|QbETELAV6V$ z3Vo~r!*}kbX&P=-147kFyHx?}q^X;1=w}DLLY}PlKcLqRy_2peJnUP6)ld>-4o8v* zkGBY)@B4&dYLmtEu`aLqR*$*fpYfZ<1BV=&)=|wKXs*)cE-jPp<{giyRTzG)Ke?i~ zdwJ(`Usx+E64*Of4C>*jZJF157UDs6@O{bUQG6mcb*2fIF(|X?e1dzIwzLllnXayw zDjv4(>z3+ov8Ha4vD;+ml#B0@!5q@kKAppAD?X@Bc@@W*cRV7&puUSkJs^pP3!d{p zvH>8qqpC=<+ZV1VU~Kfk4l`Y<72Ay+~;zBQ@x2)-nCMmL0Da10)-}Zv&Y-pHD0c!>p&^a zkatPB%-RNKOR!YbAJBgHn99%JHM6TVK!Aw4-ih@_D+I8$Sr6WuTg|u`7@r6&uwIG# zf95eIAUjA>esGt_>hx1F(*A7@4#>_rQC1Lm6cD7xWeRe9(<7$n(G33%V)9KBPb$lc zmX6jWV&((<_s&NpEwQ6oBtpDCrxS`S%x;*)J03&5%>wCey>|HN3u2pm9J+|R4pi|b zl^>6*?Som;1&9xIY(53SHtL5kYiv^W19~+u$9)cpQ{z@7)Nxvtb1wsbROAmx=_l_B zX{(q42}e3w-Yy0)^@`YVwNKLBWkvi-TX2=s$YK4V)aUyMV&Qb^w-YTiR^P2Iny#f# z92XeCTSR%^)fKXF7#^{hzR6#{v&Ib2zIJv0$YBGnYLs)CHFR_;kcBP@1r5cnw%)He zJXIh-|CL`JIPcn3$gj%eeoaoeQnVTcJ`=hxMX@2R`-@5VWTjAJYs?L;T6W)inxZl2 z0xq*i7l{ezR9Fd$1>bV9wGovy0D*|>zyOEv-0lD1`g|tz@vY9d--f0B5+k>k&Ox1@ z9g`vG4+z@xO4(<4%miR;^hL7QJ)NXhey{hwGySxu#qvF>HmqJ@P_)L@j?NvA(;3gNC>^BYG1N zo3zh8+9v>@?;p@5J>rh-jxDSzFj_}4Giy|*-H5x_D`PD4qtTJ+5cW&g>49w_{T<&j z7!n{kHIjkV_e*^5d`ycPcD+xS|atIosH**ZV!K*HOXh4gtUS`2`6)-<#zS}8k@qABbNpT#^C)Rwg1M@)g z>fAlL(n9dRk);S937%eqt*U&r44!+QMfECLo^IJTZI`Gv#ZoLsQeTxoE+K+85N3JO zo8Td>1zr(_9NvMYPg{+FoU@6wtt_4P&&W~;O+^ye><>tn3FFF8nL?$cp8HwxTjRYT z2V)&vovdO@n9ueJRpN_0;fL?CW2`HIzM~$MoB-b@(blp4fhCe%&!CRBN- z_T2#~4bk(V;Rxhhm*nlEP`Ru%dg1<0^g5=w%$R(YBSd^|gBl|mP9`}RvBGv3*o?{H zWVr|xT;~kS5%SFCPamCWJ(=9MLd(u~L0g$?S!bK%Fj842t3WV>Qh&|0;BaN$IKceT z^W30!?bx0hA)tzn%TS8*%h=jLZ!PW#1KNQa`BYq4-Av0*Oif7#xGHofZQzmvdAX@p z6s|)y#~oSrdclfg#NFc?z1Jyp@BI#Vj)RfKDBO-KQdA!dhqd!@nFpTpn;bNio7f@H zCAj6ESTKzL6lVPH2AK|bM1Y~GR&&nZAaWVO{s~%UkvGN5WQ;029wypu-NKoUuc}j0dUE#wT7OW0q=1 zDjAjD@FdZ?Qj9~Jm-!eqo@-*K~D7Rt=Ysus3Q zMYN86!lS&l%ibGsCj+;{u$);o_;ED1`CvFxg*O$(LQTbjNy4+kxNN-6AAhSAO0E$& zRWU0q5X8`30mWIFo^+~(Iexo?b>Ef{)lyd0->$%+`j>AD%!@~l5BY8-?nLjwvJ7e) z*0j%k@690Qx56(HDJNU9kIC)7CqDKLcbbqfcJ;CTehlqhYy{%BR^gNYr>T_UV}Iu9 zQ_@$>?b;tdWq*{|Dz%+Q#ztsZlbjbXpDCQw{pA=Xyc3u+F#kDK?>6mg!ji}uTBEs@ zlN!0}O26*;g63v&qvGzN>t)7$NmJX-Jvh|Z!?AqANUcdrXhkhca6m`mC6Ux%rHX`T zVSzaV14E8+d6QhMYhUAJd;IF2YOG;+&V6L~A`J1GI9X)hw9EOHdVjAfRv$q9c2SD+ zlplH2x^TM1kmyO0R^$cVA%;&L)SO&hI-al~pTQ$yMFj|8(TdQ@i^P0?s8{3t8qZjCk)T#MkrPJQth5f;<-lC8GfVSglrG#* z0Ua%Sq;@&bvF){dEvk>qsuTg+U>t-c-F*3h0^)are@=MnPq1+6Ty0S$2_Cc5fr8Et zAmQ$=d%cVCm$Cqfu_1C*`Y~c&ONjS%@9WO@T+WWvD5-q;Yl&?;i|>Cx-%3IU&UY_? z`BbzbGU@V?_#x^?YmFl*;Jf|WwD(NA&nK}SI!bjxf(|?XVSFx(NLEJ@BFNSoPGoZC z^42Hgm$;AG+ENTP>kF;b5gzd|OUUpceKXa= zcN<6cAo!th&dOFaxtqeblvWfT1)$rBPSOw4H!H&_ktB>np+Z0GVU9L*)Xli@_#vU+ zVbbrm$yk+%ym~8kcZy$MeyQvCYil`E-onhYoj8i=-IJ$JF($@*oqO`eaWP+&_YFpt zJj=0T+y07i9nyGk>8&o-do5tqne&#lVS7%Q?zKd}RPdG^^$@Y>){S<~SFZO2fs8U9R#vTpef%oWl zwi3h_e47r=B*?mJ97zZsucFmQ7r|`5>dUUSqg?-MXP| zeIt9w4AmRsR@9XK!;?Idw#*(4-*`|Z^a*>A3=jzua4WYYFo1nhoY>6K*p~HJrn}5V zQfOU?@qx8Z78c9nufe-X<+M?!c1W-SLM6d=K2)-9z%3;86J?u{DF%tZP?nHSM&0D5 z1h}e)@y;1hk^H@R(AsYH#g1>nb~pZp4$Ew0m@38HxA3KiCdMIH+|B)J>d%y?Md>Wk za*KWM8-1`2Kf|@!!wz>6CN2~q=FQ>Z&VVH%570K?{?QZ1Z)Pb)tcbe!JYToB6wKfB zaSGV8gU(2V8nHlbD^CbrwHwo-9j{jTV{I!f*elpa`l(IPQ{b)AF-cu5C(1?o@EM6e zAPMkp$fYsxF4V2;_Ylv78u8QW{iug078x29JKIpieGC2hI9__>00f>VeiPAh+&7tA zO%zjOHqHelQKcSI4i8K}CPQJBmis9-nY8O&$V?QnO1;|(i70X%Mls*9A{+;0h&%`b zm(_qQ!o~hGJ`bC;9W3nIDfQYnD2n|{Sx&@I8K|K*T5)Av0=wAGT$yTzc7}j!A}fP! z0TnJQ9?=g(R1^VV?b}7D(@>ot{Oen?cKea=zZ3jv*1{pFIL+=4BOmdOM^}&kSn0<)PQotSUg}pr|YVm}>JRnZO)!f`fHwuNwoR@9C zyaFHb=pp>$(HUE$UH7}vx2;_37Ld<0Q-()pUr{o%-W-K`ontk;HCrj0+@=Jy3!u<9 zh~F4dUl0=CTJ6ZeXUnq=g`}<>U$dY9*IjDZ{D$13_WVZo8yW||^Wgv3?U3&!MhM!p zRA3(vXYU*4x<|Y;1E_)9$Km6b-u-YGqw|vA64z>lvq|slmhIic>LVPV)$CsY>8u=p z@IslrNu2O$R+{nl@voR~Vw9TSEJJj?f@;(msbBG$KE50Y>(22qu#q$Eb&*SBL<5-2 z>mJdETt?15YplN&z*$nQt90RSUvfup9q}llR(P0wwI%1>`+<1r{pB){zVGO&i4qpv zv!ZK!{MuIn@4c&^!COuW?8y`?iM}DM2cfw9rEs}~B0dj_z^}h|t$&JAqo=iU2VaqIJI^9nnjBn@U_T+!?n>evC=_WD!Ug?HNTX zti!N=2NXmkNAFegU?2JpT)>qS_D@wa{YkC;}p zUvxozeX(9ps?DBuFy?)(8w7j+Ur!*<#V<9pO%san?K*+Vt{8X!_?Nk2`ajd1NS4d! z`yyHHP=yns_PRj&gG{ikp1V66&n+a@U& z@nU0^im`IKnO+DGtQvRUw#_r$g ziQCriyb0W#B9CrPmU@-^7?I2YhhboRjrX&N?GIyk4cINcIhrnoHOk*W{f$t)KKlKe zMZj;CwQP-K^SFCN**AY;70QV$c67>2W11CJ>5fl(XP&I4N`3C0b7)3+0QmZ`F$0ZO znX~pc;uG*1?d$mkcnhOaEBu;FMt0@+Wq59!J{7zs-VKrl2D}HqVjI2IJ!0nxV&akq zZ^{(fQTrynW;@Ntm=XPb!C49C#h9VoEgY#cfq4e+L05eZn)BzWO0oef#9Pmxy48fBvh+j*cST_0TrwLq_V0A*Jx3P!m9zKZ@udWTbVgr`!yHq>|~I zPL4ZMCh#BD4a+Dm@Rrhf^<#_PqxAH#ugm)ZNag~-5^3PhMhaU)^1X5#%Zhlt(kZ61 z#=7X}$#8L0Oaz$DehMs44&-LR_GgfaA&bm{*D8AXn_DRvFncOiPh5jEo!~#9=XjhL zNJ#|rj^^xE2kAc=O1M$hS+FTd51(}hseXN?EH`z{jUyFlk-6O4Z5h$u+1qrKS-spE zn>R4|8a;9Brtmtfj0AKm*|%=v$Wjypzt#NvAgn0>xT8NHr+fDwT7;J=4zbK>oT%Af zIJyCkWEHILYNN3(a>CatblkDOB(c*X zYQA$~Tu&nb+ZyKA`_JcL0$MZDjy|O;PtAiUr2gfe11v{dF|)E>p=+ zxD_Yn@=e9s(?=q|UbaQ?Et7UeV~`%RpTC5p;S)rAjqIkcm~moEhLZ)VpY>T2ipV|q zO^_;dI=$AT%@(NEP@2{n-G?D+M|`fIwPWAa0;B91vQ(GYAuqfGQvwU;}~BOVIyo&q@i# zzvaIbfEErx61q+vz8*eK9-d6X{7*m9uLUQ) zj~(#U9k2!1d3yT-Hv-r|ECIfrf8}?8OzsVw6d>3Bl^y>j_x+V${!5&e& zbN#LGf3o=62mZZ2&@&IuAa7?!CtoH7;I7#-skz(n^D+rN5d@I1|2~6%9djG=U-$Oh z@W1zk`hh@CasK@IGy32A^nvScUI2mcAO80~jv(Mx0PPIxdfIy1{;NIU6FtAc$3a-= zpTE&acR(P(Rq*FB4Fn>33IbgY|M>$X$o@eA?V*{2Kr666eV~UpK-XKrKw}1>KSaZL zi1ud?#0Y%-0PUah?}pLPF)$xsVdLQ9;R6+#NkHgm7#Qf77!Mx&?VM=;KT7fcw)Ty8l&}|0x`Q3-5oGKg*zJXxP};_;~nacz9&| zj5Lh=|8e=V3Mez8KT9A23^brKVLSxMfi6E|7Z%ura^cQj&*ZW*WuNbB(Hci`qD8&K z*wcp=QTJa!qy=;rWcCcb#U1H>peLf)yf`^tDxkADk|<|7W+{PgXf1TAx`%P1nI_PC zJ8vvJVe+d~Cg14rQm4gcauh-1k=k;|WJCr|1Q#UtsJQ#wyhx@xy}DqJeDti)@#Fd| z<{3n~5*uH0F(zkEo+y>7@C;(fskX9P=vzsVlnVb?mSwZR7X{UBLUWMGqK8+6bHDsv zMr@U%btK00OIRSH$>t|-E@FUn%m0>nHy(G8UiTB&bz2YKwh z1=Yd5%WGv8ZObVS<%z|qK*aAhKasT?JqRXR`^g=fb8z(r6%=;rfek7K3nv_3JLt7bpn^qdttzeE&wo{n%`CqiyUA}+(-|G)(W4CqRn`aWvMU} z-v|H3WzNW)?|y{wewmq*sP1^}9jk8NE-S+I^Y?QbLIu&BCODgCm=X_KCKe8NnZWEU zLfE$PCJF3Su0u73?MY#9NVc3w5cT%PK}0+e~>PU{(x#;xD4{o-sb zlJ$Yb_Gu8bT)KX}gn-zpnY6=QvTsH1X#kT=Q3xI=h*mhoA-~z=C?mrwM%6Ta{ATv0 zrlaTNo;=C$HsjWToS+`1duf!;%crs9iiS~JnE;^A)D`y7w~wA-J`f|~m_pyGdM5j8 zvLk!$~>TYQg*Lg*Yp(nBg8;8Q61_lttLThpXjYHz$I}5-?y;HQ#55wlPh?s zE!;l|7FC|UE>99?jf6%uZI#@%-(1|#Wb3F-UpjZh(6p~_Jz}*xudoy6$pSBZd@DUg zxlplVC4cvHde2~`*3I+lSE=^LFEXQw9sp^w3gy3kp0G!e?V4|YQ@hXwJr_SN4D%bCt#0np8-&ZQSD~8g7;9gi#5S(vI6AqKcFFo z$Fs)vkmUo^?+>M?6p=IEcXwV!&=68gw!t>BW8k+-Z><%8l&r_P5#OK)TllR)#Bn^3 zfPzeDxF``U)0h1f4`m>2lk1#azY*jD_+Ze?bC+^H?2g+@`w-ta{2n+Ly}o0$UHQ2m zrg*v-)BjliDZ?L7v6(UT4Q3<3IwFkAK8^Ox={6|Fn*P_{C?cKDZfT}Vflz=(4_G#& zhYlcIE*XjM%%}ZizPRAH!EQ%$bw-bghtq@ku}dwT8wbwJ=|@-1vyn8zx8`=P>F?<5 z+HU6cT*Ewk&?h~Z$MMtXRc#_3LiA?OhwC>T4I0v9ZRbxe$V~`X+nv9Zc-^}y1fZ+E zE;idCHLsKP@$fGLbo|!CqLR5;;}~~%90RH;a)u@1`=DJN6WgnmF^LOlLzB;|BXAj) z(B0f2Cp1=v)voQRyQ}hVKrd zo3Y-xU^0+jhco$?pzF15c?xSsnW;RioQTM)d)@~`@w1-SCrvE6!9l0cs?U*G_A#K4EV&r8oP{$7?^9gmx4* za!<$)_T$KNbCMKbKUJ|Q>NR3Ht-g$~pI+Lvn6+n8?nHy~nimRGL4@-=8^5-PnB#=c zuqKnM_KKlE7*XSaR|zPVC!|YG&t5kAND+UCTz50LQfc1-x&nFN*L{mNo~(?=_bc*S z0__(b;Xk3LP>${goEMLde(xOF$&5CotYksiKUA2}?*FPi9o?ZLt+t5TkJ6I>*{AVz zAOl+(egjz-WBWAbN>lFythS04&*f$$XY+W?MS}T=hM*fonE@4*SwSpW^6J{b#EPRc z7`@DG%f;~>W#U}6ZX)9P0OCqrYG0(v4?)&fSw4NgutVo7U21Yg17EqGDdt>cVNsXi;Dt4-f4JMQi5o!aQ&Z zD^D4n60lox|5J8w2em^61!!xi>M_yBs38-go@P5O#EKmLjPBfp;(RU{)wEb_fM$}m zHbRw(dY1t@t8Ws^xk_fL%r``|o4E#X7Ms0h-vrQwpM4nqy%M}m)At~XKnUlT3a0VT z8~^4F7|CGk9ND#Vb9z4WIzP4%fwJHQ_-%QSq{p-ewJDO=#-PilExxLH$o*i0+i5yp0(3&^Iy>{Cl zzF_Eun-pOoGx921Xgar2xQBN$i$Fdr3{kh|5;9?wKTxZIwo)H7H!68YY zvDTi{Jg3+2rKT)<+fx*y*;%rARo&6rlHPWgTO1S^@?@K5mR7$=N^G4EOZ%2#>sEZE zi<%J?Dolx%xk-z7I;;`z(^%D4wX`WJVHbmpFa!OWxuTFDz!?^ap;`(#Q|t6F^L25^ zc#ef8$*~kE;C#7NEZMs*2RFyu%%{SML`%^>zBqpIWZY+sGr;nZeY#X(b&B8CoLFhy zZ#~O#7ivAdga;}TH-(q=0{We8*`5X46R>yo>DzPpg`b}WKBw+%mv1nK=4Hl7Y+YP} zK_}LUWk2gvo)jw3GV-asJ}IuE=!Vw;9G6pvTaJ_bYLl-wyV_sua@1iC$(U?g%ytQw zW$JPtiI|zfTmrE0X@mG#S)`ctcNyi0DTthD*nGH+g=Bda!zs*`_RVH#+#t0M-#Hz{ z@!6@gx5ZLoe$mRV1k`INBa%g~bZV8PwO$+QqsJ1YtqN)5=*_ObI%jA;=)s@jyQ2(~ z1qXT0m}%h{MQ~PRPn?~mAO_K(aElx-k)x8e3VUveG&S`?UsnV-?6)q`aqtuwqO*hZ z$<$5@g7}b=*gwnyOREsIHlA~cqY5>3^tLTn#lHz^K+v!hAERAhZW+H1Y&FUoT6-$q zL(^)*f>-@L$~1>qNUbQQ(7qF`(6@eGVaiXVj^;?_rQ2M}t27+)>aKoC&$L0?2&;h> zpB%;@5l>!`wHmvI6YIG8P}g$vz$DQi`V!&?m3_1(_K6&HMc6X2foty{eGK}{SEj*M zw_3l2S;KGqaUe{B<;J*2^?!^pnMjz?zJ}!&3Ry zsj?(2qW<|C;^3XZ*&K=6hW6h!zr58xABV?dZwQbsnzB5rA7=7n=$)*dG`JZkE4B76(UD7}Y2TU<2?aaS z(PVFclU~0*$D9`Bnx;)=w(*cim>@3{;P_Bthp5-7gD$SgFjN+_IkmSdnZQmaR+l(#-J1Q%rV6C&IZN`Ib zrhfj_4?z{HHkO%L2IzE8tW=#2von`SoVku@?I))+s^C+cBb*~*FKg9MxszJ66u~|5 zgMhGw{P#I>XJdbL3fZh@5Z&Ececc?`bfx?4X5cmIG$tk8ZXvDYQhJ8?0 z`}=wBdfxKS(n)L-25e!T#kNxFE2X3)LsaC|MLqnREIZ9aTIijI2&Z_Y+??_%E#sk0 zwA2&kHW_Oz8d)%_V9EeH+nbxARXP5+NGVs1R49?E#4o8opo<#Y=iZ#|J;-`u&302+ zT{Veej9LF2b&nCfaW9{(5&+ecMZF_fniA~8? zEX{&z*YGGI;_*O~;m*>}v0%T#r8jB*LlqbjBZt7uhN0Z2DnI-ump2)UOi!dQm;#wy zMYgr)Rp@>VWB33gDX4EDy?$m23k$f_>f8y;4{5SCNfcsZCeC?aMnRM@5v< zg~|#xVF?$<<2uf=@4?R29$&9Z3WUH#Z?aLMJ3zL#UX8i)fJRg7bNB66*(_|7*ftE; zm^VqAp&WH;HeUuREMyL@(vJBGxb$c_H=>3F_tVu+_txEX3gq~7Tro^v1Pkd9Ln{_t zJDNL`@;@1m2$6JB*~U%HZl%0}FU;G~fqfk9gXq7ireZMf+A(8>)o)u|*iWwZWAF6$ z*GmmNmF=!{DQJ%E8tw@q)e_D*opoo!5ZcPlzEb%dV(d{K_{bXn`!7)_PjIItrgew+ z9}uVhGk59>sQS5`#5bKH%ZF42p(8_rjh*2|6=7^?@eCi_Wahr*E*q(B=a}hd#x3X4 zkrh3`0aTGna=rbG$S`j$JwSUOt`BaAWHTWR!aNj(x$gUYG*faDEu(xQKcpnKQ8>7; zzYTgKB=H-qUZw8_^0}&B&Cko-!#$8sq+aWqf{t@hCkg+)(=2!?)ZzBqznDmX z|A6lip%E8fnX~xw`r3~+X zkgtHfHCe8D4<((bF(>jDE%`Myp(0|#$0?*?53IiesuNVMe1l zNPs_CdKyYfqQKfHAh-I9r`n((w}7r~%_VauMw-POsRnj_DEhO8Z;5l1>DnTko_JUD z{j>Qly|Sf!qZR|T5O>+}r*4lwrwP8v@tLiprSe;6G`lgleK}wThBVlwuhiNm5)eMm6L3@d>`(qAkV$ zg0Y0#&n>r3*eE9xmHm!#$bna2zT3ba4-& zj{&JK(rJ=XS9{yAVt(h6tdhr?Jy}6#Y`Lky)-tO({Dhshm*E1kqV||oQ82}EI{!@* z>x?~#MU?~|1{PEk6k&FKHaK6QJMZo|A@5!KtIN3E62OnlwYVR27M{tpmT1iNKAg>R zvWr}#b%N;Uit}_;E$^qvJzbd*8T!eO>urRie*NzEB8uV)Wq80qIRo!33L`xdq)**q zQ0I8P%L0?!m2H0Wo3oS1kK8iNdZQ(ozyLHr#*G>6G+i*oC$ci4=zl%xA^Aklk*h1r1>aKEz@r@ow zDu*h=Q<6Spj)E;jIIp;%EFN@gf|4gPN~i(QS%)DzL1?T!$EbFnkp9&d&+= z(M?x`!#l5>%-ICk3`VJumU==wPli9ff7g!ih<2qgys50oKn1_ z!?ItXaD?o6tle)k&@lntJxn|J)x6|pWu8(lZ5S(^s(6sTed`z6R-yqAe7<0s|Im~! zu*>Z8!RpelVEe4=FHwHHU!}QjyDKe!27NCKttG;eq^3v}>#W&Ze|}8g?hHDGe^&*q z6XvsUfP;asS)ZWV1=@G$gLYEXuk`UGZojKwM@@DI#X~y9btn?u-Sfr*A3}4>6=T`t zS-d8KcO~z*48Eu9MxM7%^Tt&OZIVyNykN!+Hi}mj;z} zwajVx`LIRyWU(c5e6NLK@9-;Ys~$W#%?(ZZlh`GFRzGrBA zgKjRW4vbDC$j#RJ$z+NPvNU*Y-ZEH^XDO>FXd;ecsw~DaZ$e3O8($HeZ7EAg&4Pr~ z;B+ z8RLQ0BBA+>jsTO_wm36A-T|%S+a)N(@MJ}JU_BUOh5hwJ)Bn^;i$}CNlku&xr&nP# zQLrH&u&_ha6<=z%k|#`k?|1)B=E2iq9*-hB2==X$N>2!8-nRYlNHeD`J?nseo> zPKXIyCzHVfhW|3rQfaKDxDBF=ecR`E2XPIgI69{{zhwt9KQaqQBVx1UUnR!3Y27`N9nMUXwESMh_GdB)*+=YU*Os0WUe7y_mKz=Ft@ygM90bTd zX$=%@XG`8T&9Nsev?(HzduVdPPlhLs_9Jtoz7dK?!-F3O9ZSi-u}R-?jXhiOEMffC zjZyh$?rM~*J>KK}wb4|qD@*kM#ok*5wfRPEzrjkO6p9rmKyWKs+}h&B-Ca|>xLctV zf>WSqahKw*3GNWwf>YeJJO6iPAMAbn&3q^C!E>CM(Y@-hCFlf=X2Q9TSSC-(&XK?tW--+Q7$u3M}xurU3B~5>B-dmgs%7!2tDbd-){i^o@&x6=5=G>uu{Y|hfYsU*jD?0F;H8F3s`(-Cg$pOcip)gQD) zi9pAjYNi;+-&@4tRmopSZiytM{7f-yPBAP3V6qb&<3ht-hclhIh>rzujfhkTt>Y}!V2`R6brIeEbHTIPq|qid>%7G@QMn-E3^}w73CFkLwf3q zhT*GeA_`j(-85lnCjrFckac;&|>Hbsk^WtL^3 zQ7^k?U>BXedH^X+kYBGu-~Eo`uT-XNHpRsh!teqWiV2oiX!K6oT^|+OHnPz{`|48G zc?n*~$eQA*;x$#;AIv@U4ce6*{Css?EFM@r@?dVNybSGoUEYNkm!Lv9ojd%;=7rfe zyOyNcX|Zwn)fQ0={o<${xs(sl!e>yIJDWrkrrS`}S)_`Iz}RT57AxtF7Ic@ar94HL z`1p{1l}+r0RHSQ6Q^-#oZPzpVm5@DaFE(`>6-6Vmr)vE$*7uHAPU;yLCbE;8n6gyP zE9a$THHMQ~KrrJ~yAyR}v=g988!D+6CRjLJYREIovlY4DF zS6Q3B?R2Y&UaRmKl7@Mm#F2^E6p9@lP`wq^?>encA^6cCxiJWPPz-WA($#6LIaG7x zZYGW?Xh;oJ3`+pv;GU#roR6qf36nFHyxft?k#WbY{fVes+}& zn2mSZxMN7p`-$HdaE8g=JH6Q9QE6nbSWmrMp-8BhaPE^Wwx2%NT!NqDl{K@rD8wiP zz<#uJ6hi^cm`0sfD5@l^o^6`l$eUE+V(y!`fXS6w^vm>QO6UQ1ejYHk+o(IP)H#?#0kZek z{CkYW#!Irk4h;Eg9@4PsiDx2gg}qY;Wsqm#ii}TcDl?UkI`TxCXugZPQkVAE5vxmq z6G#cXrg$S;DNKG>reFI!K3nNf6IAJPIZib4+Y&*%w}$8U!J*PXKJ&D^e|-#q6L8Cs z^&~shww@rYWlBZKH9)h4593c-&(F}y&l@TC$3rZgc^36l8Y)o)4fqQ~z^SM0ne98x z?(6>m9Qojj+Kr@WRvLCWrkJH_l>ICv{c7@nf}9_|Sot%R>oLZzLqkLfxM*Da(XoEk zt^SywCPgJm2N|v(3}Tdi4gCt(e$((u1`7>6i}c=;ct@j-BGe~Cp5dL>DLB|^Oz<=fFnE7St0Z`oHdAHrDW`aR*S z?;A~RQf~fs7LEr|kwJ8SF=RG}ge?={i%ll@EB2lwsF+RZ5<=G>=;HIReO?F4t@~)# z7XOkvD3aGsD4Kyz{AGd&wn>?uQ0=^5`4ohym3Z6|x<3K0^KyeW*b(X}vgWf6#S2^3 zz5u*NWSF(arnJE}QsOry-WZMsmaQ%GSCpmtHJbNmQ*w#?IgcIZ-#(FQQf~qj* z+7yju!ylHR)Z8}soEAF6B;BT6M}`3Zh%Y*Un|DWFUy0cAxxcMmAmK)2p1;uyJ#3YD z?=y#-j5C;lMLiR$SKLUg;xz%~08-H5@}$D(1#e2BDV$HI$9p$4BKnGArYSa2)NG z#!S5_$2v#dbw2PPgcbPHo{r1Suz#^iUFJ(Emzkvr0s^@J_tZtrl^P#UH9e>~8(U{i zDl9DCWyD$DJYS0O~Zxr2PDHUmjPFFzuaa zht4YdHbEHih!@zn9$vAPzDD-#BZX-K4?xrsPyc`iAXJ*)zLYUbx4}fl5hbwnS*0}| z0!GA=oRtV8CI3zL7LRNPRRjm-&>nfLyVxPjxeoEU%8qGY`sRT#lxs@t#gs;CUM?5? zbZQ-SdCbQ@@|OPq9TpZ(Y+P`SjaR(BC8V&kJLLFi4UQz*5V@VK^P!7v`aNsC`VI-v zq(ddZt1*UWgR*hjr9XfSn?{Apcae#**R2$CV_XH}k1xZ4zv0Y(2J%cvmtd&Koz%pQ zptyW6Q)u_2S7k8nHt|R^Gq1s1E!G>LTJ~9CP{x^-)z+4KnWc}Wa?X=9mnfotE|#dI zM5dDk#W20Ucn^Y4E+dIEIg@ZbP1N4-05v_ia?=r?U+4TJP}q$Ro^+& z6uFzf$qZ6N=ym~u@U%!N@3o1Wr`n`F9q_ED{5Xp2v|Sf;KhRxBvg?RukuWidv1QM0 zbj;S{I~7W|BNGq8yd=a+6Ft15W1={fxv>Z-V^0yM+NbU%9mkxWXxofaR0b57~2x0aCO-G+$)~ zOulI2tTJ~rG=B5MwURSV+(wYYfh)7y<*0)$SLAnYu~cV4oOm86K`g*PViIV z9wW8W7xebp(xlp5-PC=+vnq9NeVMtM4eg$$f|3C+31PpRgYOslM2`vCru~!4s^5Mn zrs`z;EZqq}5>rd;wp2(&7+skWD^RPXF;-@4I~zMjFks9RspN~>}{weC1yFc{x{ zZ}n@ya+h@|zx1Me9VL6Lp@cCncP*^9A*mc}bjx+HShUnXw)X$4WN-iT#8ZTq+bl7}2U$O3%(*L@V9C0>jtC`R zr!@){;Uhc8m;oK1%Wh zbkG-i#aJxMfS{OdKjWALa)m{z?#8Y}14njhXj_^z7fHFOcppo1TL_!&QZrKMgJ)_6 zB{IQ!|2gl+6b~0<47f6fvB558O8nk+wO|Hou30KSHkrugwOYY>AR5&jNp3 znpB(c&#*m|D0}U(g*C;3=sM!&E^i&nSLYBY-@BOp_~?Y1%qy9Fy`E-K&yS|gqbf<0 zURd4}6)QgYnNGO~mfv2qD?YD_=_wU=ZN8$<%~-pZ2?s5K6^~x3D#aUF_`b1cNN6BOLs| z%%>Qu4b>k`Y)=P3dW|VK50Xiy_#r-?F#XdNPpD`5NhFDO;Cm6JjhH_jw{RM^BUJzZ zvR2{tA#|?C#leVri79HSSDe{^nUs&@4U7>_QUw$9wCYR4EWrUj=ij%+FS{wDLrqw` zeksKFkw;4#bN$$AwrHZ=!uYZsA$AdUd4l2vc@+tos$9lOvcf{zD~7_G=&HZ2Cd7`w zUmMB0wvU5lc^G?^ZUNaogR^x?B1Vx0_E_x}&WH0Mf)$ExZ(nhWxgBbMK(MboXb%a< zO6?N(2}~Zp)%@i-qFGkSQ=lxvBf)L-A~vUYBZmLTA#_EiqFHAf@529FzDr0*^m?-; zOeq3#`Lw6JW?yBUz@cg9dMUW{4&~P1bNlm5eIvdh&XL34B~RB6A#!A7M@9_oLtTE6 z*T?9X7~g5|VVnM9tct&e4K!ES09adD8K^#4Y-9DE--WgEmDBZqlsrhtbziNlQLkkx zhUGuOihZT8A;0=so-;QNnG&2ywrSP8IHsL}!J~c!mEa0bP_I?XE3pxE01TUSD>lIU ze)>^qlWo*~cdW&AJercQQ9Oql!{u*(3*1Q9uaF?QJDc%$PR492g^-iu+m}f-=0q3= zeV1y-fyns`lfil>M!mn(N3`nbW~$$quWy_0WE&2y0wdwWHfArr69T4*6HM_|K5dXt z?=m(%^5*H^8pwfvB;>|DmYbE_jQ}b&ebM+Rr+1FQ_m+7j>U}4Hmb@jcuLT;(1 zZd7r-aw?Ty7L`^rT@uY%QvxsmbcBR#A(DI2jvzBYszWg2Ruu9d`v2u$EulM3OjH~*4VTfN$KFnMAuqU$*z z`Pi7Pnq*Nl%zrP?OFX;(UMEF&`S{QG#N!|xiQ^0n?u0+$9$v;+pVhY>wOc)4W|F zb2~IfS{}Sw0xLLH)DW+I@L@wdtUmt(jJ4VC4DCRz8(Tg=+G9T4Z-rl?sfnI7KesS7|R zE>~ZTs|G5ZSl^cA@u!cY^UAvhTz+!E_Fu{cD%`m+S(Tw)b@ekTyvy~p=~Ov0P)ic- zwT}Nriay{fakjX7+(F|7@dMV$0)Q8z-uG7Dt7rsjvv^hOrUbO}7GTwGye+e$@?v*m zxh@Qon9jIs%rq9XGDHDdBZdbWhJIyT<@LLX8a5EtKJI(Bxg37puRG-nc5waho~0$3 zDY<_DcajrepxLok4z|mu3&Bt536krUTbBS&@!3m(U_Y{_gdc%Ec}EF&$#~+bVu9;6 zom(uTOzW)!g3=3lF0CvggVhs}Lk-&9LQ{vYX|7RrJ;( zKBieTPB;p%`t2w;63R93vP)>sYhbfT!ydtd070boZ=?D{?51&I8Y!~``)%oO*s;tH z$4@K&3*()>2^U14`Sf-eO_B){HOX0u6y^iaG#2U-PbN&(yaOPa?YaD4#b$^1TBaJu zjTHb}gVm5FdgumJ_G8qfHiFfa?WgT`K`OhmVK>|LDx?JkL6+BbVJa0HdNR4dwk|Pf z9v*u52Fd6Ou*kNFr`!rspvGtIiPjG=nek>?T)}=Yb5sxga5cT*-`(X^`LO zglR%RkZU08<<@@7F$3=(K<_^Zz>T$PrXB8T&Sl}BXe(s21ur&0_Nzs3*Qo4p$dEKG;9U5! zWIc8)OHkNJP(o_(i5az3PV+I89+FD`tQF%~>a9$Bfln{tZmHgUPn})i*%E5d`^6ze zqV0o?qo$<|{TWmQ9)6pxj2**>CCh{j!z250{}lu*&Uu~cXXJ!23==#jlSAKSh3&&H z9uJ$RuO9SdR|lY8A~jDgx8`lwt8;dl8*QpLZ_M=0hVyVr9%V82br(JUxa5Le_uxEl zBg)z3hhC5YfR0JWm~i}Iq9Jld9J~Sv%yzH#qDV&v=hNg&-C#J5viFwDUDW>HHYdAy zs!6V_h!IMgr;Ye~Y)`a_n$IlpdLE10G4mrhwRoDQNZIFaPC7AV@jAfV4}NV$Q*Y>N z6M|2@w84*dycD>kKU6T-+oU#SRNId8*r>@8`L7|Ey`H9_Qg%DkT4KAfSbDtIDj@_g?^1EA zCfkr2=l=r;1AfGHAwu=;vvS4L2<)9vp)Zz)gcw{dEa1`EJq_le-sbWsJ~M7z)Nely z6TCze3R?#WE{Hl6fpq{Pzr&0oT({)OwnHAf{FW^sY_VrWkw>&si_8TCN-Tq>M6Ui3 zhMXAGbhm*vV!ev!g@elps;?&#eBuOQ6Xd();=rX|*FW|G61FeBRJm})Y|%<4%yb$Q zi|(RJNSA#h;bgnB-(l5NG-vt6tBjsMvf1ep#3V<3bRzwvXS@2sF>)lO z>s&3r08*ZBC~Z*Nm6`i;-}5R`^Emqe10)mxJ4}-|*>#_Az6dR+jqk8ZNc7E-y=}Z9 z-9vHc0_NF79pyH}4q33UCA=j42M|?%pWKYbJ&&@{8hEvb7M|F@%3r7PoCKu$_(38I zL&GW3mSKhWvG%4|zf?Cga2%nf62YI zZdc{e9Xu($$?iaykMCj@w4;aw8Pa3}u_ntX3S0ypf;Z3Yp$t4-1K z=7-kWLjzfty0VJj+1fN83L>^()o=)UY{zJI*pzsmV4vh;)hp-ROhX7sBR&!?`lhZq z!ktWm+GI8+Nu73-VO@-@mZD`$CnJ>u9-MD1k6|ToQ19wZRGl;5S5QjBk|(W9`-ro9u^DDIFP3pfkx>M?oc98T%oIDl{Cd#n%a%;jEsk)e{cg1n$w9yc`|VZoOQo9W@Eu_6P4v_e!N_>`%G%1b zcj~aj_z<6+3Z+7+uetPEuYsdvi@uZyz;zBT>$K8}O`2AOL=SJAn0UR;U_*#U3c--7 z6T0OS?_z8C^fvm?MRL5%4(wIv7?+P0L;Xc$eNrfP$o$Z`&-!MN5TXd?PT=4-%19a{ z$CTBCO!NMT_tUwAYfm^nZvHUCnaf|vq`scDMi_;9ii9H6Vcal#3qwC^i7+B&`5Wnc z4*uYwByk>O&3+);MROgJDj}tQ=P`gP?uz-jifk8HA-p=KtINvkK(^8pZP5uF%3!BU z%-E$wW+=HJ$%eTbhX|D4Uu>?5^%u2GNdWHx^{biW5Ex07{$=mMX%yRJG}J5J&zdL! zELL~%*9k&Q{{XX^D+My9M|BNix5aHc5JMSnQz7cq67vkX(cR#20HRO=8bq7IT=h8a` zqa__X#w(#thA&8MzY{`k2}R*Di{Q3}$dES~DBWaNCFqPRZv~fJ*k3sd!v5>98!9p2~tpX;3(?r`*$GkjTfT}FlCVmboGKdIw7J6 zqkJ3jwPb@ZqUh8QrOzgjunHyqI&tG30O0fmnkjMv{#h+YTWZ48W?}+0%#ENtRdDNE zMXUrn;TGfDOCfs1Wsw4fY1e>7Xw(;79n)q+=(x)sGl~_3W@fjg1#H6KO^eD%@5g!s z&PN;<2mppYbHkSO?%S8ZuZYB>cSOyCuKUG)yPIpt zBe3N##RkLJ@dxAH>fWh1z64PnfUrcRX{9M~ z04O&l3>uE#u{pOry2oUgB%_GF+#Vx?Y_5VV_f&%aV&q2Xm{|g2GFBA*_{NIOyyZ#pn{vX&G9|=37 zqNAfB=XXfh83ol7`JmyWW6*NHAdphSG;<-O;|YFA^dUaCrW*^ytA0vs?mCJ6mY#3p zf1zN`|AB&W{{4TT;QJp1&f-E-948Jbo8E2plos%PBkPz7yDXY%#LQ({iWfZ@4sx3X zDh4(2k9#di&+x~H5h{0kExD*WlWG&ROgcI!{>*d}!=s;l2k#M9%AAWtyYocJR0-rF z#*sW6B>ri)8S|>|{;B>Xs(b|O(5!z<@5C&8se^3WuJ-&fz7qI}%wAELzSHd^$xK_8 zfBv}G*8lgUjiqE19V5Omv-MO8GcnyIcl2cNz|&V(K{=CmPSkxf5Np+TzmR`fpmvys z*5TK-Zs=EQ%xSbr`6|IVKP@YpJlP^IJL5(AKpu~kIGx`%YeG+~9pS|V4Fzt29BuFT z2UprSr(vX*N9mU@PG_H6^HoZrv<*q-no2p|!9sWAB}clYuDjcFFxlgMDUq_}O}rc=wVjYY`2&B*0?)$ z^451yQ=IktU9dZQ=imE#oylu$E?wAmR}*lzLQrOE2l7pDF~+ybAi3^HoI`xYVNk7=#qwBWFYozMY&mvMsEVPU z9sY8PvgS*IZ(@4ICh)(hlx(SY+4-WRM^Dj2Y@2LZKuA%2H2YAltl@>x*NvfiUdP?f zX*SUfLS3+i!28)>Uqg2QsD8a&h$8;-@IY5*o|pSDVwnw+QBw5h%S+L7mfSxheu^*W zWyH^xC3@P%dQ}r8&-hcSz1|pt1RlaRI?txS^YsH#GI5-L1<Cq0wPuJ|FO44ym(V_>1%w1E8E5iju*Ty z9WAwPcwb$d%)+8wP!I&R#OozKB-eqBYO_n1?Vl~jHBqXE|0udYfS zPOE#;?9L5Ughuf(WEIxCNDVo1w{wga$FezzEvb5c2JAl)pWE6OCkat9zRhek#Yh5r z0G@+p$cvyVXhsXewr48dL}m5U!>pv#FnXQzWj~__oEPx!7O{?G3QMQ?)=maMfaP3V z;Y|Wj!Te<=oeJSn@|jK__;Tt9A(B^vluP?!iY7kqZQZ~W5Pm9Ws3-jbs6BlYV2m%} zW+^{0%hpfDFua8i^s6EZNm^#7mlb-;BZBa5WF^HzTKwP7#znu8CwpYGQZoLVSXCER zyi)JlbbTNa;680YFfAj809}rnF3ByyJ|>_DT~S8ut%F7l@q7ORZOiG!?aRG{b}>Ql zs)7%UzkD)Fxik^<$ggqSKI4xpoe1@Z1lh+|NY4KAh{1~NDgqc(GGPNMVNS+Q;LXDO zlE601Z%YsM_JqiIKeEj`Zt6&Kx$0VA7V&q38(L|+v0wu}zAoCS@k@SKGiTI!8+R%+ z6(gqiz+|NK5%TPQtTV?VYzdivEZd63u(#>$5STtrJ^JSPAewzV1bD)I?VqkAq`zI3 zUm0~S>SjEFOO{6oPnEah&;08eU-yDCYxxv>VDN*vP@NJjKIF9vd1v_VG+k(3)_<<5 zdYVxg`uqiI-G$z<(QUL$k$kuAB7Ew|Pp(u~SjT;|M65O`(dpH_)%={=8Hs12c49n| zqCcXoZdVgp|7`^((r^P**gARSb;nzay{r@#v>_E#)N~&w9;Kgz+fg$^Hs=l2%;k&% z{sAuO5^iLgAt1(tgR}R}`EPW@>vzCl@1mObTj3JkDj$t-e{CLCh)r6P#pbcO#KNe! zjU7Q)%p!e*rQ6BXFz@w)hOTmhz52o6S<5d$D{ z_u2Xf@PXJT!#9lnpx5^ec5AjSUCiR~NMC@yITKx@(2x9uD>TEVL`>AFbU|5WNeFr& z(wpvz7p zH9zxCMO6koZQWR%sU?-LTr#jO*Nm8cy)*&mFWb}kwy4KJ?3n1pNZ1tZQ?&|A24mOw za@l+bI}0lkGNDt3?tKU~vGV$;NB$2G!R{_8td<_X zHae+qBYHes=l3hWk+^PH87=8vf#Tsr%A&(k*>>(ns-yI8a=lkTYBM!}fcmrc-M$kF zduD;PHrg~9TGdQRra0cUxn?0p_t#S4{ZD<3=+Vz-3;zIV#n)8xh7V{OXEz~ywZm>D zAI4CIKKVfr3%tf=Wqxj_Eh({z)meW-W2VgM#}5J(?oMnk&LHctdANc4-BDa*@X7^> zyLf$QlJ-_>Rz>-v=4=(?lAQ(k3l`;H9s#CVh=juSJ3r|vd+co6*7&&CrlwD0uHsq% zkIV81leG|%65v=P`(;`s0nIWh3eu@ucXpA@68mZV-&6@rP%SD7t)wKL&L)$+O|tYg%TO^FAKtO^fonm5*-2pmWrgF*jO7>k0l3 zxK{_l{>dpZ4%cE_)H2l8{{Xs3L${9Jv*HqdD&g>v*K9p0_dehGzY!`3y^#q}>v}jA z&JxBOYY9)fGk>XN9{KeXh0ZxuKw-R@^}Uus*@Gf58M2E!?*9WZvB2dEzkZreTVfBi zx7m`s8m12Uldap=q+uMF#fNT?m@LR zFZSPa|8;gPx90P;f7_>Ozw(|q4W~Yn< z%VUg(jxqkURj5ZskH7&gqEUQ(7wOn(B-Zm$WXUjG4`g0m5gj5}*&pq%MxecXkaC2F zK66MzFZ$&pZWty0C0*OjiBVlWr5w`v*5p?<>=Eq1>10~_?Rk?YURh3-5+Wx1<(c25 z?}NS=dbY*ynZI>U_7d;3B3n(7IlSz{P~j)|J=Y_6;8A)#Q&u^q^6x-v9q@b>lVy)y zxzzuT`s(2AO&M#5vee0;KKICJ<1aK*1m-ez4kXN%{2)f}2Grzp~9vOljDA#`HF*^oKrZ=R%(!lI3J(QV*LTlYIaS+ zF`sAeiWiMqk$mb-;RBc*<1kUUgkJ$96(TR3F`IHRK_;q02G?_1x_(FI_Akgeg74;`N&OIkwaJ>k$C(jKa0CHc(u(wHC0(V>hr!! z?Tq_wZ~OZzwTePpSu(cq;nEc~mwx5@Q_9gr=#(L1VAH0VO>d&WLRP9F ziS1HIIp$4-%UXqN!?{R9Kbe!%+Ok%1mK*~Ci0G@3x^M{FWRdz@#(NgKJWh7j+DOAV zQX&LZ#4Y)zgN7yi>~pVIi<8r_PTn$~r|N5Cz>8fqR=jn;H!716pnXGvWFr+-ZNNW3 zK@Ag`AZu8l@D_-}p~?uLjj&v4;5SV>qM1k7`hfv*~AkJ$kK{=neq;0x2}$ z%1)+3zqNP(zi2o3ng6rwznNWk#(84EG8WV6O}}bgTNYG`-QWYkD;o3{h8HkN|JUF{ z7c`z)QwM;v<5sEt`Qj%mue1sP_x3bdVB52lf&wDxh)kHnNd!N{z8 zfouV<6@Ty%^Uye)$e>y5pfIh7F}ZNbF8-V*nnk?gQTB|#0OZZ}c25QE@j)k9Xr-Cg zbAE7!fuO0Ll2OuPNn%}OPCh|!GaeCE9qWM@m%6MhoL0j5t#d4hT;g8ws~AEKhS0&1 zg>VK((B2X-)dT#TMH2qY&T}YEWXmnPYp%UByiSQW{X(mm zS7Dum{_p2vsN~^<+o&@%G|BtdB!tsxrKTM>*U`-R#b?tDhl3oOXk2Bs;JLba{uoZM z9oj!YzFY|NKR~o;$}(Up%199!0Q_OxSTmKc@h(MTHcryq3#iUk_~0V6uN*{7cApi&zt`Y7O8Ar< zWMhS%IxZQA>!ZV4Y_F<9Ze-9>r%r&zG9W1U>>fciI#$87@ONx4L?ZhS%41D*xR@*K z6+Fnkh)mOA;QP$XUv~-eah&Y=mRGF8zpLH8apKW)x{8|=*zOCDHdJDCuEDL}cf8?{73B*;e-dS%=?z>2Q2ZphX% z+)TRcP~7~NWR*RLp*ECRmh@2tbXIyp&|RJk{NaS&2ka(Q|+lg&n~@7I|d__^7@ASDFx%Nsh#z32NRwbSgI znjc|2Q&F(~BbmC7C>C>1mcP}7?E9E7wY|TZdCSzsEP(pU=Z5pFt-xWPKwJsdE9XDk zKS4b0li4_Cm1bh8iLqUC2}3vr!5ed;{>o#lVXwbetWl!o0S%+-3H}&hxOBbA2wamG zYfT0Ho87KwjFMe(9u8+Z0Z5AZ^Jam6pE9O5b}$6P!5^JETmc{qVRxik0AzN|c(@f8 zOCx@|C>P1LE8iMfk!OB*(nS5_5*qD3jQ?0xnSZ2|R&Zi?l}{1LSo6-kRfS73_QEaJ zSmLL`T=R0=V`!sb72oXN6HZ3GpIlJEZqo6qs>D(5*PUi0Rx^_V#3MgQd@keVphB5s zLL$1Scoo+3UI%xL(#5g$1_OE?wHqPXE+CrPn)$_)-wj%}FN_+lnHWj64VC$v$XG@NZqkA97w@SsLVfP}XVjB5AZ+0u^ zR*!QP5T#C(Z2)Ts2}H4gTSj?`I|Gv8F9Oe(cVesreS0<9;J7JIhXY6~S{-k!uwE}w znAMIjrY`*n9Tns4HO4;^+iD!OFFao8b7HMZ@qDLZE?db~hIC$YJ$m)&&-1Pmb^bm1 z

f+InKsZbjNzP|@A9PZ66#TAG@ZIjtSvo2lTww)l_9&`_s6BaZ>4FaZ^9 z1p$ep$@j_isypfH{6UUaUD}pH^Ks?et<;8WZm7apMPy*7W38o4Y#UBLR~2K|wRzh0 zZKspV6d~Ip6}_;o@sO5@PzK0sh-Yp=K}4j#jd3gjqW}^B6%pCI5*wBh7Qhuy+am{J z6O(T^bf7&-^Wt9`GGNxTRsyo>(aP$Bb8NWU&0h~L5cGkyrNe!bG@XKH87b8 z4&hYpa+qz$Iyvr)N`~`zZTpBv9XW=A)B`eWX_>_nVpr~6%!$7_#^wh#&q7+L4IsYu zz1@9zLHr-Ez%rlM#q{){G~+UOVd7C3g5nHj@5D+n=PG6|oVTO)ZdY58M<_eyaLcAJ z+;hW)0VRIU;`s0g@lc>Kuou+I0L8+WJH}@VN_gN;u_vZ7TBp1FB%nH5cDm=liKmVC zRq&w}ATlYkzPBJMo!t8#0@+Bovnf8O$RWXnH42lbDOt;jgWa2{6bz{Z#&iq!T^)2< zU^+rH;~BVMH=mO}XL(66au>pYJupUCPI6$hv+EKh?3AFAN&wnGmPrZU!Cw~^NZOcBF}oyzbBMD~ z(UkGu2yMq03bPwy+{=7eA7F`DD+{!CwuDSP2w|ETa4a)DyUUl|&%204msGQPRfBn! z3dkZTX1;ya+E`mSAEJgdaid@7`f``FK5=QMMN!CBhjT^==Y1@3xw&Owp|g_Ad>?{n zsT%q`;|Jn>A|IqiJmMjhUE|1#5Yi#XMO`(ZtvWe zG*6vwi5!Mv5=Z)W#9;F5l)?*ngo@Tl{dIabwNqTkfz(gDxQ1C5KbQ3lAhU_;L8ES? z`3zgC04A?(s_fHz0!5wg0qDE#Hwu!@D8MQPgCt)^T!$O;Kj*pG*Vl2qZ_XBW=~ zRhBW0vC!dGyK1^~-zR|0?2i5#OOEahDG%qEW88VCfq8M^a;@ciq@3OOL7}J3(_Q$p z7d{j|RhEOA^F1%`7^a&51?m|3HH^a|eT3^UMP$6HkCmjBZtTljX?UF}^wJNa_ z;ET>)0o}GHA*+xT+A-WD4Bd8Ka>kmM6UPYicCT?MoR@UX87H6`Cm`bUfP{8KODlQe zjG9NjSKk`Fy=>4p0a>JIzlF0fcAEwq(akp`0% zSaM9UkjZjRkxtewp~t`_iHu?`%*}c*z#qalf|UP2{)2p({SG6Yj7L}=zp${xwQ%${*?RwuQp^r$N=qB`V&#G~ikZ&M z;>i915OMJ50neK7DaWAICB_lVsbpxamgP>5wRi;ib*W?6*fyEs)IP_&h^1#a(F2`p za>={)LL@0QA))LBR-+@8Q3ZNErq0REo6~H37uf=Lq9@+Hys31W-wGNfsEjj6Z>c7d zGY>!q3aBH6wPE&*lUS^GP_-n(Gi?^qj!>5e#y@#{csM9=l3fnGz;sMnN=u#M_CqG- znBsQ%khr2);R_WNhmf)ix?iiC!on$Ja|e5N-CEs^!&=G3Dv} z+U#wZj-~OvW?I@8qbb)Q5Rvw|x3u1MsC1ZPk78b7&Rz78vTE`l%E&Zyt+IWEmM6%9 zs+Q0M_u`apubFH}I8N;#QX3xYG~-gss0aCQ_w9;iw$><|I)M!B^q|mpWhtmVit!v{ zOeDE3*u`~ETX4VrS(!+>e#6x2wdHMi@8|Ou3bHbC$YBdd>~Q=pe6oeeAzv2%(fM;% z$9HMv%W8$lKB$t_`d#)ximXXy(|f5+R_Fd+TqCel{aAbbjAtCv0ZdqzeRD)s zVby)|@Ij~qQ^LC^O(m_iI&dK$pDA2f=eGsVTY#KpK%Y1pg~P6GcKlE%xEtJ@qsH6L zk;$4c!7Swsk1;3v-h@fEHWudU-81i#(MGmA1^&I%3MX&!8G~FV1L{#Z0Ymn|zH#dj zQ2m#-)8V${*pT$D2x+mZcUx&>E-hh=+^mcg@A#D6sTs4ZOj|OWpynWKE~;RyPSHU! zN_O0&rHJuH_vCB_%*iv`vLn3vGmE4}G7$O$KAHBjREK#EUl(|5wX%*8R7(y$kCg5x zuO&N|)Dq8SSX+_o?z|M>oRm6DMFh}8&k|N=X`+z%6_%Lc#h7W}gwGiN4gr%?NQ)T$ zLyQR@HdIV9`HT5NhYeei#TJsa!#G?=z+~ns#?m|Guud~TUNXua> zVY&Z{u{RG$^uG4Sp_!ykGV{$d%w#f|%x0D*nVFcqStd!H)Fft4nrSjgrb(u4w%*=u zY15`{?iFwY7eECBL3R)k5fBj(5fyhu+!c3JR1`%-#r1a{?tAaA_kHhs-}}cIa2}rL z`7Y;szGwNI^K~ZeSbcieIE0?;(Z-~cJreW6taubw$F3lCII!5<37REt#IG2{LZvc8 zQ31QVv1>YW+wTWIH*UlyP6xRu!KF+I#7tNUQL(IIfK7Ubbif!Obv>RqzbD^Ktyils zEHf?5SYW9Se1fvD6PZ7Rq{^6tlZ@x!Ak0?@=ZQCBkKPE$IzRZ=gU3X0?9Zb&k~VI4 ze+?$tQS{AoM~)tF&foY0bPfN;UA*z=f$Gp5uOHa+LzYe6?jb@5vTC-^pS>AUYEBS# zfbeviU7!)5Js>;{XCG(`DCsm4QgFZ(4>SQp0h$D&0)cWsNCxr(r2wTun$8UPlhWY( zEGQ0tLIfXtr9tM-9DLG&=7BPR79fk~MiK`-(n`oYeHKvlXHWhu268b@5EkJ^UkqUh zzB1s(0DLlmmVsD6D?m9wL7-fqRrvY893q(Dqv1wE1^mz8&+aCe;d2P65(w|51t<*E z3$zZ@2ebi%Cm3x44FE-ewx9nwy8V&;xQ&|# zb*C>t@4hB9!cS*_8i1hF1SkCduFHfP_&N>kuZK^3-0VR3xYfdC8aPnktA=cO!%HZE zS89PufpkD+Kq|?x!-)hnd@Bah02KkPoGvH0P79)4?}GdBA?XCN60bnJ(y2v5Um1Zo0m25JFn z1!@Co2kHRo1abj&0l9&?iHDBAdG--AsMtpztcQU=gnJrc!0~}T1Ot5t*bb@@0SyDK z{_I178)yk8VGRDpv%e;QCV{4ayg<`HGsHtNkVJ->&|yFLSdSh)1e0+H*z6Fn*df9K zkRNCfC;+q!v;wpWv<4IgS_cXNrJ{=m50^*TVH5t00O4$~4YUKa3$zEc50rHJFR%~} z!6G;W3*Znar-xwVk7Gx4fXq62*b1C~h>#9M1Ihr(0?G!01S8;cc;C-HCv*cbp?fU& zI|nEiC=ZAY!~x<0+}3M2pz zf8?M4ip%~xw)l6Ju)=fxoh9t>9cK$ibnM0Fe|jr8I*&g18qOmJ$g4j{!Z`(B0RKOi z{Es*Ue%}-A$IaN09zPeuFay&KN-;oH=lY_(ulWfWQA+u82N+?715VNw)j`GjG71DL}YT{NLFE zXN7-c4JQ1}iVi`?9Uu{Npqc&ui7)5YrT+$&&$HHI3p zdz@hfp!XzcTa%6@-2iFquGnL^J3qLq<3E<$PRv+SY0&Xoo7?<^d-KwNSB{O1hMASs`n5KH)7J!xSHD7`J#3^^_0H53m zNdQ&UAyAl zBKBP!j_yg<6QE!A`ELB?MDytM=y0YM?Ou2|ML#hgFxwQjz!SX(uO_52n<}=g;}Q}C zg3`NEr|tVF;iXv9RT$#l-t83oa+(yvJ+x7CQl&uFI1_sdf&Rc}i3+ybCM|50hCnOL zD%Vogau$*%Tz>Lx=(NCG*&}ZA$?wPHeR_e1)3A;xSQ?@zu(Mj^n zxQC%6mmW%Hm{HE_=M+6TDQnX{nU%;Ue|ilbSwC8+pYgLLUttVQWVND_mht3=r&4%$ zL*(W`L%J$edOub+;5D^b{hWJ$alH*a_DF)dwSc}LQh^0!h>R>`Po%7&M_wj3Az4BK zwMDQ^34mjW7s^a)cV>8av7cR^LdP#&?Ho##Bq_xZGcKLnO_z3U=}rJkxn5x;HnK<4 zmm_q^87#>>x1e!V&y|3y5}cT?2hx{WwlsnKhgd0NXL7r5ubWIn!;c;i-9);6C_DZP z;mKFeL!J2Z4h;T?a~_Y&yglyzGjWwC2(6>;IQi|*pShO+(O@vT>qn?wH>Fkj4nWrf zGtJGq;A-qmgm!f76qr*x6V8I#eu)@dB1LWaja;52?L}xC?$cF`w=2218?eMR)@^<9y^@WdaObML7JTvAoQz2AoC3<3uF+~FQi3zPQ=io62%*ey$=eNGw#B1_px<@2nhT#_jP58z)* zpKKppvDC{zC~##OnyiOe@;WTIuBgFkToroLc5}sFVKRHZs<6|{Lyw)ok$m8#Gm3P7j)ym(;v`aptgOyy@fBhre zbJe}MgXrYD;5o*7u(;l`I-XGvh*_(uxWgaXk~;_gm*3dcs*>+_+f)$gs*v;sBYpgm z+Uvc99te+(uQ&H~QT-}`?KXbziaW|a2Ow2)StuDPi&4XZYls3m1C!{CKfsrq#NT2G~(GrVAtRXt4}^4yAPvv^cHhi zNoe4;nA4DUhlk7p^oU3r;q&A60 z^5!nD08$b6?)8O>T_pmG_!`DgWMynx09Ow9rc77_T=Oi`-(0Qy3ME+CS*I@9zLKg+l4PLG z=T8`W6y*Yz3zE5+(a=h{yiV*hO{46C57RUaE0Wc~)LX#mfRl!iv`dRRT3Xgs-(XcO zYcs5^**SFaJs@_KFjjiBxO6dK=}%m7Yv$bTlVH|gMN*`3N~6Dg;RTl(g!i-Qnsjk!P#;n zG#n!8uo5GJ%8iL!wd@*JCD9F1vc>(Rz54TFO}A_-IOs-NsCRv z!)3rb{j2-i+j+_ICqPOzZxm+;>neZ`nnAAlDO)?@q#gz_x1P-;ijfpSG82bdSb%B?+P8?R-8)u|n zVS-$3Q*X|mNpdrxBP#M_ySh!jWFH{GjHTVBa((M;Kk=Jr{4KL3eTzGxl)Q~mj0&=G zM?-Ov+i_tCL+M}Iv24;)B_P$5-qb4As9M9Aghb=0yO3krR}Z%o$WLHdj&^q0rd;|c zbR3|Wz;&rh>b_vL7sdYYVPQSBEZ7oER&8yX1LC$@z96ee}QYW0SWO;Vw0pC zz;IjgudsZ}`2J{L{d8umgy4gTE=?EFwkQ?yLzsT1!7kh8kX3MH4i*`d-_KXCqfsB9|v5MH(reOV)49$FOnlrLc-4Np~23n_TORr4S^1EU+E zl})4MUog#letSQ?t%CHs%Y<>*YL3Ooo!jYSG^6AeERP}d3pr(yM=mpPB`WrKDg#Ga zJ@zW&dvTw9FRtKwaqoUFZsvTP^6rm~(LZbMCZwMy-S!gi5b_<5d5e8gCAi&l zJh>uUg?&q+BjlqSZ-p*RJC+>1o2%kPjL#Y^9G>bGq1=}VSizdIhS7}@S;26vN12mGZx14s**bdJxt$?0+t72YbU`_aRdydjo9r`U_5%o)*no}X(Ppzu4S5EN7`(>a#W>1Af@ zAb|@TAs{ojWyM2bp7JPWG?m-a169+c-@Z#=!juIX^ji}&G60VAo%9gLWbW1yD*%j) zZ@XFR!MV*(0=W5d0>$u}XLvz>ROq(d{aQ0e8Yvf*OJf;!3w7uDas&`P)wSU8r z@%v|c)^b%1AoV*q;^z95iUepUE!xheJ$tdAqK`gugkqHf~In=Ymt-;;Xc5)lO7HS}|<&Aak{a>ugqa zrYsf{lPQAo*)FyMo|zk!1&Y)(!+Oe;;s8b;7_0547HVDr?t-lg?qk#rm4{YBibI&z z%C6sB-ECR|R4#0JAOv~C;(=C9zT^s~Q+YMZ%xxwP=S_>=XIsv0PSL7l=dtP;hbUm~ zZI~hPz#n)#^LF}OHyn86u~*-}^fBmGedp6-;=Vc*xA}v(mu`qVegpVmpM`IP+3R#9wcILFZCH4deg>B%m`jROob#O(Mh?NOdJoQM5 zA8=%NR2rHe%9$?Au~bN7pw3DbWjzw~cE*Gi*W& zuxx@Zx|~N=$c|x|G}BC^xwnOM@e6P$;IcxvJ(@ouD+b6SR#l%doZ|JQ;Q1&27W&h7 zxio#rV@fc%gyRD(GEwMG7>~uMOkK#PuiN{L@(jQ>I)vKPl@RL`x)Jn|Yiaz2K!iLm z1@ve-HMb;nK;8Kg@B)YgV6cXnn&MDt7wL^RPwb9Wt?@LX8~=g_8lc;In&yZor4U9q zWilt2w5!*mzN?VFar&xXYwBV-LI6k6`e#|w`ZoVPAoqi?Ii>PSX;WD|>|5|hu? zmIl!GpCGKjm5eoa#B24az}7HkWCksfTr~G{s9t~%qKL8Ipp{vr3lQbw_HX!^?o1?q z7Q8a}hzt%cM21HrDzM_UW|L@ocC{tzML|saw=w9)zdAr7o%!m)bJ3#oUw`@8XXtAv z)CvFG0~Mza!GTxclR@8}KY$%1d2T*nl8|2e;8TfU!)V^uE{|QRnKWp&Hk6Ly{yB{qRC8C1FJ?@6E}UqytL5-ZVct=XI0%5MlH9#?gcU<^EQO zWL1%XrF3kkg_DcJq`H^Uk1ya&$r_R6$~zYoKfwxfn~Fm{`VL%?LRT)}T^Jd26}F}> zDgOm4)-^e{mU>wo#J?c_9e7)c9d)bBP_hgZXQ78u(plxQZW7O;<0mC42`SXlO?szn z1z-YMLqSWhMRac*x_lBJugR?L{L&hq{1RpgvgnP{On``*-Y4yfO26x!I+uCDL!P zYEQk!IjzVhz5G00RQz>8n6tYZ6o>&o#ONXSTDqMP0g;Q;C($4&F-z><+#V9og2@o! ztvNTk0`H@j9szjHNLF!9SYM#ff-`NqL|(D2cagM@qrH>p$caSv96v>6mj%G)Pq7AA zm6g6BVg-8OPDK4lys|%oygH)*G*CF4!fiJ9pqHOVKe-dl9!*#=_D`)dA1ZTOpqp_3@N0;6|Q2^<42M5PTViKy3sk1vtOo!sCw~!?Fw69 zTq8a8D0<`+>F#3*=^~}INeJ4T`R&GgK=kgO`DwL{;hv zvWX#tF5=FhIkR_~!Bn8FOXrfQS#=>nxoeO39D4d9K9(y+O2n6>lAXk=wUqVhbbZOp zKcQblFRgYkodSNE>Pd`QFUfGxrlq9w2xVPF>J#zJqx_*MGjt(&V=q!$#j7KI@x}pv z8R?CU)O`V>~-$f9d?xih4bzd?N$e+h4EGtelio?#`pT`e|AXM@o=gC~)r z{fl@f@@mFWFf-Y~H9L~vK+v9$R-`Rg0 zzuJ&r=F@ovDhQod4OkVk1&f_OhvkQU{~$_#J8?+f7VN2#fa60wnXXPz%@(m@y{PCx z+$}elzFWjIn#6Zt#-V*}_Vk1Y{p1-)wvGMjimzhMr`wc*>BjRI*FuvEvU~A2zJ^_e zH!c^}(?zX%DZmriK^t>GE8c$(eSDmDLxN7-Z^{x{K*DADc~DMiMom&4#51N684+q( zKdl&WQe4@bKA068$|17R+Cv9O$58dnPYHG;<6vK!WX_77yq0D}xD!>}kFLI$FucU- zY;r*{F@BRrxWB<1mZP|n=$Q+z1L!GNm@aM_T)GcjY!qV!Z!VwcLd_52qgm_G@$~Kj z`DKjb*)Jq_CFOUMpGA8Q;w&+u>Bv|Js?q$d=W4hOW&M&IfBcUyN{uFKw&$U}4m3#;l zC9Av3D#tW8pvtEocsVh>*s$uXDwO>d%XOHT8ML|DG~!i=km1^NvTeuAl8vhVhAC5| zl3v})faFVn1mTuz<|JiixI-d-0<-O=sVHrlB9iu9!W_(igJBJ(dz<)gF5PEpz_lU0unaMN{~y9NAKzF#M1kLuU4mgpnIS%DtN}x_`ukS6(E@Qd=NE)N4wNXesgAPA@ zJt4lVQqbVAK%p3eX?(}Q+#4lL+=hxCAbyyTl_pdtcS?_A)?foO&)MF^0h7J^fyuFi zl3tlWx1`(yY>?F}=}2uZM#s()Ob6WwK1Xk{aZ4-z5)*{wYIY}omK1uDP!4l5xKg}Y zpV=;X8cJ3a+eUqUR{rHq^!9o5@skPRoUVXIh1-P^?pC#E%(z7g9fX7XF6X>$XB7-g zuYraKy9}>XADVEf-!tNm#Y5ow>MmNwQTBpa#-jqPD z+R{#I=}1}28GN?;FW`*q$edzj(<-HP@IDz$Sg)k88oyBo&?%<5cKk11B_tnwF2OmcvSjQp%5KLjqOLAl z^41{fPhUi5<#-HhF0DB*G$sf3Vf(r>7z14q($wbw0DbNn7+CZBb@k0%@?T@z-JI>B zB~QU=ynAiY`LwXxo8(zrl7m=Pu7~4n=xKHmO=u_%9V8_zacslQi%Kvu1i{+sJw&^$_5u(!LJcI9^%bKAJORqEL^0*{a%Lf4KZEbdZzw%4E%BEuaZXZbjcwb)5i z`Y=A2EwuvOND*5G{ZlU$1?a4k^TuE&hH+* z9|R`PCobJiQ%YkeFM9ho>|NH#V-O~?5HCSm>g;?`gL}`S_#7)XXRc8R_RyCwD$~KrmHOrk3AD4! zQ_!5pvr@0a{1ruOfQ>oKK`*lmVqWAF$4WXR^WX)^AU)&?UCK!&E19bMup*X=6C9gu z!>gd6Cm+H$t==70tH?V6Flxb4ew$uDnv14mAcx`^uzu_-?&RXgBIFbq!kJC0nk4-s z(SYa=;jA0jZYdpDoffNMoNexA;ckDV5R_%pLulxhQ{I`Rg5D`*#q}>vSen|Z=Z3kl zuc5{0`>4E;tjaW|>>-Rs?eD2k)0QqT5u^vdO)&2*DW=P%ki?SYo{$a{u;vsJ#J+Ty zaCK(W+8eBvTOn5FqUYDq^2)!4_DiBwTue0v#>h~w_$Ey47@tVbE7hVu#-hl>sPD*? zaJQ{yHdhS(&lK%IeNCZf8n2>*?tTOXo{RS`n73BDu09Vtu1A?+Hex-rnBhsqrZe9cC`(!)iyS%9>B80KK}H;{w(fr%!bk)tiF;; z*>1fIqGo*Ol5Mb3*+JAp+2=C}k-4FfdCj2meN4fUwo&NQ6w>lR6!$2uy|ByN`71+d zk}-%LyUXeYgR*(_DgY`DK6|2gBuv)RGtox}&xw7^no3)}HUWk2jQT7n>05I}+tX0y zM5b^xkR{UOgTSp&Bo3BTs;$v7mfo!by(Z6T7F4oo4Ww%?5i+9=6l>5#?sD#kk7Ek1 zZfSjfD_tHNL5YvyeN)Q%((+fV;uo-LQS0`CB*?@INTm_+k|3kSEy2p}X$AC7qTg<7 zvY9hU%eO)R0|yHaxTbxc{d}lArODlF3s0|x9)v+jBA$kbTSHC8*7nkfbQt~S?qhT1 zM$SkG?H)sqK92W(s#xe&q;%q?SG0ZH`tE954bmJz*P<6E(z+FXd`Exg+OR?j3q+Q|XFECv(Eis4;Fa-D#C~yQcZW$gbt(Z7L7i8tCC+{YUbG1P z>Iq!>xpKIRDRr5u5PIp2W1it~Q}PN5eg)|ncnQ75QIEr3l&%5{t*M^XRx1o=mVg~+ ze*$04GA?TW3DOJBisRF}}Ww+2CRC>aNWB|k&i z-QuW~LK)>D-JH4DwfH42SZ;a}SMC_>^r`m72-H^+v0Tjh_Py0JC~^n!8F*WPQY7gq z(TV?x!BGjRJrn8cq)(5dPoG4^Z^v)Xs?eCO0X65@} zAnops%`u0AP}hhs0wKb}hUtz5yHDc5lYFVg&6KGz(%A1&`crst#gNzBnKM`{x$9Ih zyCrpGJ9{4vQ?o*L0n1^#YC>UF!)WXd zqCJh?JreJ)SFLvUNpHst1!MX>UIw4^<;{eCs7fLZ*eEQgR{=e!q?(!+ATIR_V0v`% z8GJl5BSTHW-G)2~_hn%6tY5 zVTn`1ByUrZY7tgvTY*7q99UO?b>B*S7Q!;^%SGBG1r@#b_fxWsv{G`-3Iip52;PY^ zSJ@pwEv+OXhDbz7Q(i$}%hQ0@DM#;Kx_tInzn?!RPeQO^m$l4ji#wOK=)OBS%Pr^V#i|dn?6jFcNv0ft zAV&1p=h5B|t~8|u>il}}<5p<_j`MAhdM zgL~-qAE66a{060{xV#%LU9D;1s8+?k<51^F_#^c37sm$jwrx{=X#CfwxccInrcPl9 zeQ+=GJdM1E66nc0wC-`)2Do`T>|MjbdM)XDeweh!^JlAztH@MU?SzO#5^y4%d}9-|X` zQTTb>g|#JZpr?sPp@zI7l--q8H(yM0UO<$1yx(gM=5$}!fi^xnmA`DsPV$*DNE@FL z8jt__l|-w;UK^es0F|-aM;!`x`P*7SdA;Rl$oqrDtZ`*!>#zd+h!mHbsZSk~oI`h< zj;?^FI=Oi^sXz*y zl+cxIZmW(c0N%mTmC)Q;s!1`8r1AJ7&X`}f4{fH&_^`n$L}{bC9S8c+tp{2Iq=!-B z3PY(`3-rpL!BU(lqg~s^Z0Y5{qt||fZ(9{CtAMs$Fa0s5+NP5ib#2z4AlotY(s9^& z3z(c0EqO{-1{35Q7)+_|Ep*4mA=5qh8_z-WX?wY3)Uv328q1Q+Ox6{SIA=g&Qi*>BEomoP zIZ->kCjAmJ<+QtX6?w&ffTcx&y|DRKaP98mdk-0Y@0q2RBwI0=l!Xe*=oQ;}_~Hqb8; zAH|Bv^Oc?v4mE?AgQgzG{p#7(mW2#os`Ph|f)-hUsImWF@EdOd#pAY^(QavVzpYvP z6zJ0f+ktK#@5MU+kMd{O)v8!oG-cna{KrtqX?9~_IY$-s$1i~X2!2grs59B!Ed?{z zsvhX$xVrmaz;~f)Xbx2NA8Qa5aUC+)O-j|<3)Lf20TSn4hzEWMvdX>MOK+dlC{kbn zf~D7;pO%g?0FNIH@-FH|m$_nr@(MuOMCwUL>1?~XxXy!;mW=@ZEE zPC~X=CTgq!kR?sDvfyq{q7BE|2ogB`LF4vihy``B>liJgqi@nGw#K2aKZvUMgN;>f zMUClFP&xBhmSvGKFhI(D7>_GMY;{~w;*j>|Dq}D!6jSne>J{IjjfbKku)Nvy$RbTG zfe3(ocXDBc))qo{et=H?5a)@K)+AA-V@n!`iGw-fEcbd6B%(w3>xVc`h|Buv>e5~D z&%k_SXH86vaY*_9N)Qk~fI*o_Eiq6X!^)=sTwoTpbPSB5UqP-!G+@}S(KLhx9SYEJ zbF{ori^D1WIU0)vKiFvDT0_HdmI@*c`Li{WTZZw%TBxz|%$;_2Kj+s2cMi>$baO14c@oXumtZ7|S{AZpaK4DcObHqaXUAh} zKw>?6&8zX-a&zaUL8w(>s&+a@dW|;$PbWov4FZOBrMuoE2LA+ge_fd#?vLbwDEQuM z=(E@a@@AEDoegD@cpU??j-6o!AXd;*ul)}Cpm6Bx2W%cG2)j~8;rbROko_9;A!$_- zD^1jw7WNLbSkzB~73>(xOUl^)BkFjJ;J@+owQaqjQ$Cxe{2Q3*%gI^wW9#38+xCMu zp1vw<3LAKgG>8@HoO#05YRd9?Xy#P3&M7A};?$I>RN(h8iz2MD*^%Of%M8L~)asFL ztfVy2O@OBksHCL0=v^fjT( zdqBA3-hBaUB{Qi|UPQvI)K*DH1wV;&{Vbqwt|l($1@ngmE=3|n5AEg`w3X8TfbM)Z zdQ)|sbz!N4t@seL2kX=Yi{+K1ni!DX_;Efx6*X&9{Mr)mm1b)9D{@Vq3PtQI09JbL z#PCpssG&uP9Jim!9j_7!B0M?DK7zY+6C?RU$tG)FaT|ylokt^4cdnE2ZUJK!tsXli zopD!G)GG$iTh}iJcg9Th!%F;Q1}Zqe+PTe@o|J4YM2b^wEex_=x7bPi8xp+X_@{*A z;&LmkN~(kzuuPQHq$X{lXOH8aVCZ%eCA>msISdz1_ zWLJwp(6EG&GH-pbRS%o;Hgv9cby%pZRe$fn(jE^hBWHDx30lYwWF zom(u)^)v^?*WVySh-YCR6b>)-`I)8Ct5^wd+qq#*XWamkik|*4t{gF$x*}mvUUC`B zaxXQnwswT@`o^14bD1}vo9Ex@Q=P=p-F9oSs>DU=IfkdG!$WJxD-#=89O)Ne<7}3h z=2w^Cfan+$c?;L@T+0DXQD-Y+J$x=HMXyL>RW0uj^9an?SK-bvmB*LmwrKCbDiu?< zx{4Jwj-Nw!MfJvpl)0|8yb0+MjO?t)DqBpEk;;z)*5l8>6LZ#LV+B~7E&#lHWHr{p z26sp71p*_wDesKT)=9jbn&V(;H_w?n_Ztnd7ZC0FbI=^W!=#nglIpN?rNNAq{p3|c zHAucczDay8eyEl{-!Tet!OEU#`CMZ|9_g*8VQaz*^$k?J+@m7{#jj#yOT^QZ#^pKY z(K46`Q0{jwOB)EZE545)rk(v${6UDNPBQtF@y2diX7 znR-Chqv_F27J*~1ROFnnYK06l6#oNf7rrrxGsk>1TG4sTuuX9b+uW(}k3?LZ2;tm1qDH4zK@UC^ZSZ#UiNxP?=ICD{N}F{ubU<9>rI!ij2hpO@^We zEKQqlJEJmX{&V1nf6Kpi)_8?E8+^P#mWCT1>+e}A^1<|Uzlodtn_Z<{bE+x{Y<8K9 zjG{dMoJR{r(!sZI{U*P&ZXsW{0hHUmF8Gw?t>CXi}6yDXmo?tbkUomMA-n z=xQ9=cndvoGTt|t#heSsVz4AlMkHA3*@g2&%*1Cv5$(!pDljz7Nx|i)AJXe5$_hRB zNj<3QEnM@<-C30kuzFOmhia%5vnoq{66x+k=&Q>TaY9d~gWRE3s^CUlPV0h7H%Z!n zoJFV#0@d!3+HK3REP$_AG!R)-56lU_gFZAw z{m2zPWSx1{0`+Y5mK{yqc)A1KdncZH4~iy}J;ARXn#51ps~!>+^z63xq1MOoDscdt zW=A4v91TOPLVy0lG3FS@)mi2AqbJ`&Z`>a5+YXs!E-`G`8J6WS%3dZ3?8=6>(a}p+ zImv6Abv3vPm(8loZmsF;eGNL_2S*%8_bRpo;dv=wKQg!$O9Q!Cs=W@$yHWdmc1^dW zuWXY*;aWG()@7I(go0`4M)0fy44^VsJY@}n<-&K2>m;)iL!|a2kOJ`RwfQXznU}9o z0-xDCcE}M{a|k~_14`-uGwoT-yX8XxD1n#b6bHuO(5p}38eWP|rdSL0Pvy!#$CT#f zg_Q|e-6L?t4OgoDlS1KyK#s5q_e^Q9Qay?n$psAXm8$Zb-7fBqW`eDs7zqCuV62c*50M`fMATx1r1g^oIu$Wa53{ zB3oJr+K;Q0Y}F33(eoIpNWh1zpUmqM45ulczOg6{qsHpnl^DM8O1J^;-cWqGUv5y=`pCiR%C%h%jWszKn;&aY1+9+d5*|{M;NSG3cXZHp}h@U4%B?i&{Bk{_HCOEp`dzf}ZV|J&_J6=M+x)Tn#*r7FzG*Ks2 zAg>vWB0;N3Ag%;Wqb$1I3zu_!HPdn>NF}>}dcTUkRRhcY-uFn=w>&kd4&~2@@5AVm zZLIYCY!o>NXmBwUVoI*`GZsd?(!XMber=&(snfXzNm@D41N-Q;8;-h(dhqMeBMsS} zfwU4h&;!LK;$t~kZSLV$)hU06<)`%5R19vY@PnaH`+MlcFOHRp_iFOiFGDt*Z_}Zx zSDHxuPor-VC40vPS-E3NVO03UDWln5R5;#T^&$bXM!p42A$>XD-O6#|Fd>eMSIeHV zPsxaXMYW0e?2I&Ibk-YFAXlr3fG;s;PXbkwgpB!lVhxg@b8fIfHyUzpnVM%qGFsu=dnBoAY3OfGGn0mp7;?5i-^ z+E)3vb>E-_?-ZBi>}ndW+enSY@Dt@@u`7M#F6H!k2b8!@vrF@-xk3dFqer=~qTBDf zIxDa%7ljIlsF}zXT{7Kh!q2f&6FKly-At=#rzuTl1QS@(stj(}yFJ7nB>n)okHk|Y z#kMYuORVRTMv5EtM1;vGP^spt>BBv<~C?@vA zatnR9e|!-Pi`(ABxwo@^bGjlj1vY-E$Knf3PI8g%X7v1|0r z&XzR+;M^ZQAFp{H_!MpDef^rU=p?fIsO0yo0 zKft}G_33;0qq1H3J(x%l*@$$z)2>Uzj<3N+n{LbBn$K2B5+L<#wI(QYEt59SBlnvU zay;ZMYd3w0+p*G+S8OdP=|nw8(AJxi_a%Bc`pM9wN(8(o5=0nPb&K{`D!TVA$w*?P z+`YsRsj5MxDOgyZ*7b7Ppy02;@kFqK7Cb4^MX~JrV4!IFEGem+#}NsEE(d3^uJw0~ z^EpZexD3Yk`D99P!MT9We2jgxPgw$8ACX(}Li_K*d z5`g#6w!OA2*K8h%a{@i~7Ot)Zi(2+F@)wn_W2p?bxN4%kiA1^`{Vb|&lQ*>~wMq#T zL1rm-_>q-~`lmtutrB}dUmM;t7H%z9!ucnP0g-z)%gMlF)$7;qhaISQAy2ZZp~=yE zDX09KBT{x!XePP?aexyf#d_yhOOY%Qs+noB23!+0hln_RY)|$eMM$)u-FLUFr1;05yn&s(}4f>^wY6)Yq( z_4B0XUqPC;QTfqhiqh2e+AOqx_Eb?*r(}hf9YR&dQQzCR02r|vT!Dp3`L8e=En5=q z-!Uvh&9&XA!-v<9%P$>WmcN0gG&^?|2Qogp89~`n^WiK0#;%UWdPqqv<;s~^#>|XM zFqk{UOVIDd^d-l%o2K{@OVOKiMMb(|G?W^Xt&j_Zw^{5eJ;s^nA&1Ar0aZ+5n>+FyfYcTl97|HFQ6-GU`%>Ef!4XP)L2~7 zUj7OC+rjv9j+UHVD0>Af9$jS9c8&FAx0HZQP z{1(=az0W6Lv`ZydF(ykOt@JJM{^rOcUV?^{!)Z`tu@e3K?sHbzIBRrRZel`Awe?-p zbuNK1&@k7m6E^{rKTO-JnJ=cHXYPs_p+b$u`TjYOE^M$=`=y| z%Aa3%dlzzAWoc4~@zj`1;g%7d;VQBmCVm8~Brvs=*3#1>`wLcMt6{e3y4xRw?iCR) z!L{kBMaOb#MEWC)k=!?jG}3NS=oi?!cq=z|QHvdB_S-@w=edguwL+x$LE zI%S@prw@AMAdWO?5%*?ra}d3FC!t)OkeF;suWpcTC}BH*EXB2!EL9d9kfb7h17k)D z^bEMF2O&0bN?@O6+vQA)vlz_Pk3bw%kl{E;L7k!mX5H7bQ8YiyB~g!~2Yzz-!-V~Y zoUH{sUp=`gJRe||rM1VVA|q(FP(X2XgtEzDNx#8VO=jPaw3bTx&(WLjPZlI(7c6y*QDuDqpRk212a0FLQ0<)r&hcNzBtT7GPIAf= z6eU*ZW~TZp%yez`dtUi0&1y#ZG$krOVu8n&Aekdr5+wX&h0(1F7kpyHubGw35 zKM>NRq!UrgvWaTTDjwoN9ePENVrWgn-Y*6(OFQ^xT51*=77Vkp3BXFSSar(n^?k&+ z9sRlx%{+9)UhNE|x5*)fo7p38ZrxW8od6C(_rH&J?o61rS4x>PAl$61vU!KX?0ObG zpGal_<8>%D`~9<1@|&?zINC1-oM(87UQZM+IHu2&n{(167GN$`M^biET5|{a3d(vP zeH@qA+FBu7R!PL5zl7x>2E~ydbi57a37!Xz&9@DgCj-7e21|7>Ce1rD$>`^&QQ!Nx zzL+^0Xv;TDh$#?k=9;oqE%e1wNdAOVaDn%{)u~&eK9&-+BAJZsw#`x*B-ZVOu?KEV z+-|n}1k*XnH=y>gw>6{-rEWt(bjWW8;n23>Yvye_U!TcfY;ZBjmj6>R8J z+n0SI2SBqQ{t$f^mk=z-@$~Yb{-P0%di8Wp)PUKK%pXi%Kh{J}u5b^aPwD6AqR@iG zOQm3_Fg=Pc%A{4*t|(M8E{M{N#l@fzD+r6k5AX9l|Gi9d*z3J_zGu#vGjr$8opUB;H>kg&y1t^aE_@uc?lgEO3_22C zp1A)(Je!=799wZUE4b7PeEYp3Wnc#(l}!b$59*^~n(AUz?*8DM@RO8$F6?tK$?{C4 z_tCVXm(j3RQj+#m8U~=|{e0UiFThd(Xr`Clr@(=5$fvo#T56 zf5Fn5-$0pntIOR>y-r3$sG!~ZykBM>^nM6o&dU{baK5~4cTP${1cQ8{BJWII;;txC z@KBhm!rxia4qZNoaq|1;1Zkya9atiGv|c-Y=rRBYRh66?mMf zy-(RO7N&bAT6$mka4I|RMZ{u=p0ew0&Don3%}@z<35IUqb>bage5l}2983r%wp9Le zrRsQ9G~xF;iop$ajfcknph3($k6EQ2syz0FU7Xbc6_QR%_g zQlmu>A-Xgn!8i8oqaf6xs0WYA?0~vcucHQ%gWC@6D!%F7K+*mNZ#86@Qh77>Lq-*Z z?%wIK^+ZX2md{Dn$n!Q@8!e^PrCrfv^A|`|iiBu|d9-unrV_<)PApJbVw`Pu&j_ zqDQZlZ+ZAK{|x1|bH_)a+M}ngUMe~g4HoG8zO^CteeT1X!f5ch8DUv)zOZ?FfNwPP z0cYahC%=knJWNqBR=m0CX`I=U-@Z5YdW0o8y5V74MsdwGitCTqP1mw1|iv3u<-rTs66L~Ni9>vjHcOE*~@c7^dk60Lb;`hgccMI+%U%DDg zBL}BlN_y#;bkW!hq6gr2DZhrZncG8P=@Y!3Rr+5#wIys9WvD-@RcItWwI+0b=rP!` z`E35Sw4%FtTPWMs!pkhK(Y6@RfX0^kSO;?Gp<5r{7KXkj{o_O$B_P>A=XdMY6riSIB#NC7l@Rd0(ode|JZro8 z8oXcv-L(IsD|CfA;V9Eg4G6nxp7}Q-ZQWy^cge z43DsOuh5s7eq{y13Or`euSjY*^6ElfrvC7=ReO>yhs7q6UK@7AvS_fIA9GL-2 zL~b<&JWdFC{{X%l-XP$}_1@*_=Z-$xM|wDW_=ToG-`s0C+eqY%s2FN=OG49=#=66? z7G(d5Eg$x7c~eh`A>)us^Fxkg9;s=$jYA-xtGn)z9evP${o?OTH6rgx!mc%ho@6g?o1or zp&>QoctFwN{0F412C!5VXS}+v_+w^M8O(1c z8SKe7uQT^1WB2OYipKq-FniDYEA7!K&;b=5eRnwMa>PBtOBg8O_`=}@ce2hr`4A5c zIJ7$7>sE

Lic?oJu6k&7l#1$wKpj5Zf3}nXcGBSMcn(;md8HJNu#HsfFY`~?bnE^0v74eJcEsr zKYpbBgo95n4Iq+EnCnbj+Cn6i80dU2OK(_RH5)|?nKan4tS;N<;)UChtI08ihkc_j z9*TJy$$*VmZ~^jbF25=MXA8W>g(U4LEWBD&rv=3Z_Y|nlRG$|mjTMbC;F;`O+*qEP zbn*f$K!N=_A;?#bUrS}~!x$J39LYVjBjM!nx1>XKh*gR)Yj(f%4tbInIUV}4^4;NK z`GuyS#At=~Z#306@42591+Tt`j)cD4wLjL6(n`j&pU!Aj9qtmhdbxpD1rde3Z>K>d>1EXZXfr7GbW(Fe zMaFel8oDE)s^mu1=}<~O9fjB@OM6wkt8Ax&AGQsRh$IcvR&3uptb>uE1METFrslymuL=h8Syu3OtWr$h>^_ zWE`v|cW=+Q+gf(-BWc`3%x%-T96WXF_INbMD;qCjW z6>+4?JH{7>Csmw`xx5f+*GsYXIjZ^EljPC|@$X>}nb7R*`J&Zdx&v*6kY;J`oBe%) zpGU)D$*)b5k9(+!qA(v+F3as7vCT#NY2B_6WS!?77~U zvCp?WR-b$#a_82HqM`~j(ps8Au($h91{6FliZmx*JRg1V-PP(dl#K!$j>_`jjgYN{ zXZFT_gjwg4I}<;+-@Qy)O2RR$rksncgimtk;s=uVR_FRX-|bDR#B=-s3X89DFW$E^ zXgjRSJ$t&~>GczP->w7Ub}IU!uvbru%3tk^s)W8MI6UBXz@aD~*m4#ktAJ}v`nKA# ze=a_Y$|ueJW%Q1N`$__@;tkifLs?!wJo~if&WX6+VS)C=pn@CccQ@HgRyA2g;(OIk z(Y->^PCX%uj2wCEx`BMO)uzX7ujU&J<@32*Hl5C4TC*4ejvJH1;nG$)uP{MljhPH< zTN_vVW$RYj%VlPKi!sP*u1?edl9tvWl=zcW4`qyy>LZg4q+0Df-8hrGDsR$*^|K^v zn!isxd(XnQfNU^=3rQeg3h35!ru8xxCuiWo5x6n<0wzsUt~!Rvl%rJ)q_=UrvlIZ` zYb5~aM)KSFP}ld!kc%HD>X@In#hokub<@~*_*P(G$mP;md?+v5+Qmt3BbQk-1YEWo zgUw@glzQ~!ztB=T(%Yc#{s#6QG&%>{J*^^9v>?2v`z6u7A@56mbJebJx%3pdesn(;)#Ul<) zcRbjzopzHjoWbYtfjQsvCP8n^FbCadkSXK5rb8mMP~EVo`~@Q;rn|K*shpxNjw^UD zc+*OQp#lb-&EbL8tr=_%?86UQUbZT%WuPi9Kc9I9b7QNt3OAMd#K4tC0!`F*->J7%NkyE8iAzvyI3aNAfi%(_qxDu*J z$YxM2xKaU~C6x&HT)OEtL=U4PGF3q5F$hfkK*A0$qRHQ+Z89s}>pzT{=&`H%zME~L z!&{Y9hb1-3U#V^OWpRW|kx(M0GB`pOl`UqAseB7Dn<|pBEZ8iGh$-f)%~ad$Pep{y zG72E3T~q?;-VSxhm9Y5@%c00k1NK*VNI zMFIv7emOj-F~W7t&D*I+8_LNYv{~LzLjdV>LHhJ2!y@ao@9JhVUnOLP(+*@_h<~QG zIZ!I(@I?|apUUDggjBZ3LP+HcnNq5S1z*Hrvjr@kL`?IzZu&QXV&RzrLXvhM;Yb=` z^O7oneW3Tok+z!h^YqzuJBmjRC|WVEln%0sxh#Q%!D3LE5(yMlBo$GG3_gp>k}||x zF>sOck4z^hWdCS-5@f%fNHDP_MQbe5tzF)9cXwTUsK&fW6%nt4CEm8HN(~IBT_NgH zUDwz+IN0w*BW;bKFdQzMt!AnlNXTCYS!WEk(q-KIE}79jrKvXMV5{f|!S?m)=smPJ zwbSF7c4*p2HBBRJDu>VWc;PEs5lARlYvQcY04?Z5Xrb1e-HYpQ-hXWQZjDt&z30hC z2ByO#leGqDPFFjsYBF}y51>#!oo#%c;G&(h7Pt)!UZiU~WurQg(~z@zO!*2UJ&F3` zqngmKbnpFT4D zHO&sa$y;oPCKv%9E|bSNSZ{I8(PSeIwX8dKeyz@E-G^g-xM#+#C5~g*ELhcQFwRv@ zV6z+!{F5scvw#Jc#j}|) zwH>U%W8(WeK9D7u>w)O9oBR!~cC2Rpbl(Zq(M6Bvl?)vvU22T=WFZv_glsyMA+}&p*>tcy zs15;@ArZ6bTme_gkXq0)blI#8@f4eN4E525_ZqKbHbAF949eivC z7zdjn;&Ay~DvQq$P}yuIAM6)83K1XbR4kM*C1Qzif?BJhd>#txO)v$-8bFjc4Vm?~ zS;=%y6Ze0NEUi;*JvINp5{nrku9PmMLhV755CEi*&4n&aEMkc`47yap5xi62LGcns z0$3w}o&O;7@pivG#$&a;-5X62w(=Nc2hss^lYkDIhNGq&!_8&kMqce>MLs%m~ZXpSe6(m4#MOFo|m zvOW<*1^Vc^-QNf?PXR@u5y{n)HOCMWHe{T z6()wAmzduoIxu2$@aaIpUc!)CZ_G?OkD4=p}Ye%hs-uIm#C-m@4`8PnioosW^6avHuf>xr$?Z*O_Y_@~$DCqv76OvH5(D<2Bp_C=1^We5V zqOo{99-Sp&Q(-%6U@GQ7X8})Ds#wesF&I2HPrzeq_L1vC28+g|G5UBn(tzqRL1ySj z^b?wgR-%o_8zrJr^pRjn7)6)~8}BHRN=H>8olpPn{7d+&%MMHz%70d=W8a7PeF)RH z-R+>W9;VtsE!r=)m$_g|z_;Pl`P&z@Dl;q`_V~s(P&iB`-5HA3+B?{FRI())o|DV^ zE(^qW((cEo~gHeZFO)_E7=q5%o$7N1-_KqtinJk!ZLFJp6Pu7}fESW8y|BIE> zNMW%?RtrQkrizVBsAgtDXJ-eQnVDq1#K>yK)Oqub0K?2oI?Ke!WTjm0Ji*Lt-MV!& z8+;dSnbR74qibj5SU&^GM}-VDJGq?+u=#|pR2(>K;{q=;leIJ_y2@RkT$4MuM4w;; zFQOv+BKk9a5j7f(Env(2mU)36QR z$HpLpX&|WC8MjQ?I{Z$F>k*&Esq?qJto^Ke5;jCawpf7PR>Rw8XfUPQ)jAT z1W1ZwQ^g_q_IEq)1fWx@HSU_BweLim+8?Woo+=hwTg{zqF?EJiQ79FCTpKv^8b`Uy z+@C~7i+)*br2Z62(_tMp%Uqp>R#r1GX6LU0|MQ=>C7L(KzzL7({T(ww*7Q` zPI1rtxnj8IIvg7yfuGZ;Yz$5W9bT83z7JRCC-9f*nb z40$2FE$-0Y_^xwXDiE5K-rnARq3hhVbcB+42x*<~IyV|NXV)2sP~LfG8y6b|91;Gi zEC&l~j7MnDFVMaVAkVL^%UMjtb%M4CA+-nX?MEgB)>J%lnG z+cS`khRPIGL?oCX6&(T*_7l!QBS2GX1VvH+!Jlr=D!Ab^8L1G6uw5ciO${3);cvK( zRCUxS#th+rZ|2yjoBW~6^w<@tF;bM2sW-Q<{(F|Ki>Id6kfHj+3`|G;FnY{b7?AP! zf(c@Y)Z!=0snh1oU$F3J5O_T2f@tn8fJ%UANQ{!?`8c5z8**`4x7k1L;4J*|FL^Sq(4 z>Ghkp&F|iSz;Y2l&d&5nvL15jfLto7szg^HJsEn)D=miBWWqpK%54BPVE3f01U`H0RA7Osjm6?TAvTv??VSRrM(**~+FGNwk4FC=6S2LZUkB}l{W@qb>eoU0t$rP}-|p8zdvU)G+DrO%&|ccFgZ}Zs zXk!2P0CdnlJ|Omw55Nxk#|Omz@d4OD|M-B|KRy6E=$}96A0KS*A0NORV*mI6>!5#p z&_6!FcF;dQ=pP?oJLn%D^p6j)9rVv1^p6kv#|QXZbN~1N>!5#p&_6!FcF;dQ=pP?o zJLn%D5Zmu4uk-xr#saPmETdCU*E&z?8hda~>|E`MFXGhs`@dN2iN<%W_Jk$Lkp7F+ zo_Gx(fQ;{7!#5DGP3&L8_usDJBYtHKU(e^s7j0@w@x%?k!J1G^gS)kFCF)3R@0SP zzy^z#Cmn=!{XohrmIBjG7Jcwl2j@{bkKP-LnAt8N4d6C`NtwmV-w9nT`ry5ez1F{A z5ebUA?l@ED$WnwbDYIafbg}4*GrPivy*DY6PaK&ue-T2Mlv&U?Dxan3gJU}rOTp;L z0!wl6(&@MPKO%%_v{Gdk`*pDBB?iMMw|9lRJAlfRY8a!=Gc0>mFaaS<%Bat;cA$2J zruZCCq7#+R!$CE^L~Xn6$W8)8Og-Fb)QF$d(S_PG;6(>GxXcTB1#g^FWzYytB$$+o z{L#Dvyes%eXCw>mXH+JRsPScyGj`os!UtCsOvE1>$i^y>&7snYQgh`pjep zXlp9(dO8I}#H0+ySo^PFOnUEaGx?ec)d_$w zDTAHZ@D(s{}_SYI@B40hO!w3i7$m|(2bEiDP|1pC}6G8vpN#KS$b;UdFB zgzv#$8)$Gh2#9L3R|%x2Zg9|(gA#MmO#*_5*WhWcl)2$TXXb?vC1D;8#Mcu5Hx-J( z9F3tWP<$X5uXaoIX`KLK8;50U9c<(f@I>mk!rt)wSFn?wK+0iDx&x(m2m03_COz?#<8yS!8%0;|9<8Hi yM&%Da-7}IFf6mzT`KM=g<+<(d*=>L8%HA>4Z8cpDx^WnY!hh>=E!x7v3;iD|T~iYP diff --git a/docs/development/llama-star/idea-arch.pdf b/docs/development/llama-star/idea-arch.pdf deleted file mode 100644 index 4fa92c71dc4c511378c628113b7817e583053758..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42334 zcmbsR1zc3$*Y^+8ol1i+bP5bZOC#MW-5t_WlF}WL(jg%wpn`O#bVx`@r!)d0&%pTm z{pG%H@qRA9uP-ul=FI!7z0YT@z0cZ*)gxP$g&KKbF;o}ZEwJgS(q0rEPpLo*xZVsFboj&?<3Hpz=Fb_q_Rda zeQNUSs^C}GrUHL6S=HShY@?!~`OUwMrca##Y~YWT0W8XgHcE|xyyQj|J z&z%6*O$MJJXKG?;C}QUZ&}Ic60P?Vb|6%23)kOg}^VgNGzxkieQFOF3Rxx!3XoD~J zSR9}WU=g*mv2#?hH#9Z{T=$-+6A-|0gBB3D?y6r5uHP8);A^u8Uw86Ta1R4m9-BV1 zG&WV15czL6vvC61{_i*9_s&mGbX8TE!V4a3;A$AUM**NdBJw~6j69OWg;NTZ@>PTh z3ZQxR7+xF)n_k^SwHaC>n8H{L2la)&(IoN#mSSg%(qpleFA>(r-%2?S=Ta}bH0P(* zK1@!TfAgCB2&MSLAFkC|1KO88TLWizt3~JD{l4~77y>M43=Qb!MKd!viSLhL#(p&L zr=%20c2=G2GW(j;%`i17(l||A`NUFiiBcgzp@@xh@_EzXufV)isp=qPyMpUIN}y*8 z0K6v zl}d$LIi(vN{hlTZ#fp=n-@itaZ4=5ShC{d;dH@53^s=3&-Vr1GjK?Aa%I&rJau{5w zl+kPK$z;ow^=`BC{VJ6+BOv3;Ho3(RH`&3XYP2X;eBW}rL@{V;P1_WU*qu_6ogK}I zRg1Hxsd-Xwl}r9^j|;VZn(e_1;fM^)d7JGLmIgei6DqA&52_jy)S#+u5rkG2OBB8V zpbp@ns^`)UNrPv`rGw31@WAw^zB}#KYEQX=tn7q z3o;d3d0p^Y=5c>N8b;Ef$>k~5)lxH$<)@xDrnJmv{QbanHKp~J$N`}ib%A_-8Zpc! zC;>j>i#`OLw1|Y>>DD+FF8WYjfjo*eQi$Dl{7|BluvAY4Jz%`Hw6TCF-Hr7YrLpBs z(QF<+*UrgMb;6!5$_i7cte&7nn>bSEFcId?2iUa0+>1eU&O%}LV`hb6GC(Ks1=+*S z0iXhWC9z-=VVNaiEQR1Ig#|uCp$G#%LqB_AWe+vx=P?8)@6TzEpab`$8QmTg%~wJQ zC8$M)6?)Fk63KrL3pYo&I0i2dGeu+qi&zWET~t_+8wa*SG&BY)PUviirjUT)MRB&M zBL5MkGtoyR-52Lsh(iE&giC0z0PJL#QA704Q15-GDjty_NwoGYJa)rPL&R^MS(L89 z6TtY;ioZy_2gMYq-x`l3)Qto?EUtk>y#H7vMuQ3(o7_5Fqr_alW-|mGh)$5-EhK4nsq$cI9Xj5f0fe=3*>j^pKmn&_O%9WiQqG2PV85a z58KrZq7AewlHS68Hr#$hg3;AX_?5)oyM{oA?I}kY+y~6bh&tb`HuOcBeYbjsIh=3) zm@Q6wM$gF`QG7^zNav%;I-6)?khS2f{V~ZC0aP|rs#ruwYe<&?*sbhJeuWCo6hT<- z?K(#EZ=*g7*GrMm$j9SI1ySKEQ_-hX`>9G4!NY@Iv3%QHtE29@a9n&aI{+O<%(D2wx+KWXZ9+Y&LwA51? z|0dQtVJpd#TH)nnl53Jab0)1mxsUYE*FS1tlv;#ZRQz%))U(AGt6TX4rNmNm5{yEO zQjJ25UVP=~P@=An^Nag%PoYR?qhf?QpPWc!G~a!srdqC=ciMEC^??Le-oz)!WqL; zhUWoay7-Pf6)1$;wqvb!=k~WU#|2m zTR!tZsYe3VfzA0 z+B6lR=~8i0kv(!MpU37@4L%N5529JGTF|h+;i&7%shHK&ED@i`;%VdSexB?8>WmSU zKU^a`9q$XCKGP19%EK)tGEI6-cn#a?&5HB3>Go7>U1P7VAgd+gs)5&ER^OEtJ)D2m zwBz+CRkz{`UY&g1;0eYF#Rbg;5RpIFEw};W^t1Cby1D?baqO-2#hQWrh$Tc?PujPm zQU&wrngjF$=mWW^X!v{hV-E=&haDDcNE~CCO_(F}-R!f+)MgTvw6@OsP#tiBZp@Ct+qL<-z*eAf8J zi{-oFCCufs^Dl>!du!*D7YcCPa3#p=D0y(=aO#KxsBFmh;9cO_TA*9%{K;5G^r>{l z@L)uKh&~JCL*_u!#XWZ-U}j=3q1kq?QFHTlbA@|oB=%Uey-qT^d!$<;JQ7a^&sDrf zqC;$4LQc$6EJ;izNrOqVYFRu&D+2JEACHU}*OTg*W6|lz7n$It?XM_HyUZY+#H#IS z$9F45eS>8&`YoCc45Y+l59YD1LhrX0t;m)g>DRihz)i;21z*Iibuz9+?(2Rx{Sgb7 z6i^v3{5YvyH`G|FObS`nK9BQY)gg*zj;b8^fl<2G?fii&<5v)l3qF-te(e z-`B9O3e>Z4Tl1P;PtLydp9JhQ_kPEt~I?hMl0x$O+QAj70ry zZ_AB$-)tK;?5i#lmrA1=;l0+*buU=vPCk0AL|vs5!-gZA?=m-fUg^#9Z+OjJ4kH#} z>IwFHa9>(n8gDP<(4r_XWE2aQUXFjCG%IR2+RmCZ7_160j}EF4Ww}Z?D_3ZPA1oSV z6t5QFlqi)Lj5v$<;_>#VW3MkFX>=*DiT>yN(}dwSncl7!4dzBY?R(b4TPcFlbA^pO z=MyKkYbNW_}tJ6V;M@H<)MS4lkH z=>aFJujm5b|6E0H-T6wE$tGmRKCq)PI6BlXfOcGW8iM8!l}$L)GbuU~AW);HcY>Z; zOY*I-{C@s%3)j-;<72+>oway75%mTHqC2n06kLjWxp4wYa*Y(1KBFD%ZLhtyOF64M zJ;qnyxa`c%_sN)h!jh5Uv3hV6e13Y|Hb|d@N-C$7{5FJJ#W zu#_lN!LxFyx&Kt<>AY;4&fu4!08TVqqk5SShgu)XQqJ8AX;pa@2FGkHUPq~M$f=9`SJv~H)t^Xc~Hj3-J$xEMlDe)U;}g(mwI zP(|k3gNsy74@UuZTX!;BBO?~{$CDdqXZZ~1tQS1m3#Q+ULtYIT8kx@toY?wQh*(Ws zB=1C7|oBP@LEWz0L!?91Y@O{uD zSMJbk(kWh*}-s`F#j@Apl(>y*nQ2nJO1 zJy=0jsChXN>9EWYg$x>rhE-A2w3Nw@8$-}DbE@D(C{Z_Fb1<@dByp8eEq`_!Eqaj3$z5XWFaN=9R2l|HcVbme zA_6IE!f^Z3tEgPsqpZ@s*U!l2s;Bo~qo7Z*K3YN_{rsJihk&q+ZH@rRw!R6S1rBNo z=_gANKAg9VHjuBOoY(#YfzSmQdWwo1j4=3!6-9zoRpdnj)(ZgZQUH#z2?-LAFuLwj5%WbZ71Jmhoru@>~_XR6+m9pdouKvT>X;bKSFR zNH*8HyQw56f~R88C?vLgU@5%EQ^<~vNuDE?lfZ%jEo$zaot?)EGW@bEm@raA{QGZF zE4Wj0auW=@TO&6s=Q7}78qS7Ekj7~Je7od>i*_D+2c1X3Lfh}6`w5x4DJ&jyVilsJ zfKN*x7lF0s?ET6ETW${isQHu$%b;qG2+8-g90>v=m!EH->a#w1fG>Vk%CuL|`_c}R zXLv|VBl@o$36ZcOX73@X(rjD%2^GnYHZJ-!Xat#orjX$XQjv2*#Iz7>{hHw-K4Ze2 z;~tF&HGiNI<8PYcWy{t(9Ul19&iS}IBT&4sq$1@=YbK7+&4~OZeG7-b_xFQ7pL@#} zM+IK9Gx(AWJnW&PAjQ+Dm2P4?6n-Xk4Yc>QQMkRI27F#PX2!eAZqU7GeN`>fBy7ai z0Jp`DJwY*OMcsbRhV~X}{Rz<%%ZTdZFtpx-H)Onhn{H2^!XuTp{;*2_F}H8#S5rbv z-DMBIYRo#2o|>sy1uHcPBN6|~(#(=^Hg(R`H#d}A|l9ur?Sx$LAQv5v%hN;F3p%Sk#d~uvGfb$3p(StB4zFU!)3CF}& zFPXw->D-gYVy^Tv<$ZcZYuCnN=6TgOEKgEwzP#T_%WLeO|Gb`?upt?L%hXXU&q_l+uK4w(w`0ES&B<+4mV2|}PewRTg{*Dk6Klz^KUT}ejnZjL zcE_%Td40l=>+z)F@<=+@ntq))E4-b~8#9m-{gCuY;X@!Tov6#e?&blGmrPny|2$j^ zyddsKm{S4nzr$cSiRh^W7Tr-TpK3T*ePNI z{0?Yr%(kO%R~u!YSXRyk3LK8X%tWu{OvIDqt?25I7^0Zai&0e%Coz%Zs|Gph>zO%@ z;v^`Q)fHBLR@1Ahr!D4mNtk#W!Ncr3>kb5IW?CZgmUDOTjm;(&B;ys%;lm4al1m3v zmo95zKY6-O1*@tx^77OVg^{DNo_=UR&xB1A5(2vD4EmpJ34_ z8zM@UJ#)oQ2K6Wm?DN=yeP3s$AT=9d91DHP#hqy@j52}=$K!Zcc%^svg9oh|WrW3% z;(#Zk$-AbIM;)_oD^oM3vR^3h4V}sJ@edPEytV+O*iIi&a5L3nTIrVBUzagS_Gmd) zCp4;k%e5^BtR!}0vDZnRb&M}pNKaNked!yS#L?nd=uh5O{ph_v7GI59HoyCbK(Fw< zwt3cbd65^X$!=)y^p^p`;lR^fQk=(n$Sv8jxYFoS=Pj$+g0MUl!zzXGdtM(HTY765 z;qab_eTt&wK^t)Pw_AmiP#$qeOq-5SOTaK!EQ9-H&2fzHX9t}ZzmfvSbB?Crpqyug@Xq~jQ@ zc*8uoWfTKwWM_@^pijlq#cR>3 z7#UT~xu$qziRi;gA+7KXv@0hKGA(c5(==n(5SpLZ)|i!tp7Y_+4~(twWMvK*eff-J zDs8hY>IyPVH}O673M|EcQ0xb5ek5V}Zn25%X00$Cy?e?z#p?={Zn2f}$vtzo=}9!L zA0ue6(xyY1Uco?#H;ZYQTSta`ElTW-Ug2(_CS|oh1r#-0teX$qIiYpiZe2`y+!E8H z%Gh!$yh;|uCk7>vdQ5`-HT>9iY^a3qR8dX0RtDv|o8G@!f(sb-b+pR4cki>qbn`3t zs|{Zdx>#+Qu^(LGx%;ixf#Fa%oY4R4ax2vvk;wdkOL9TJ5x&- zkXD3gA-l_AZ{8aYRFzI1+Z`B>3ECl$73D~|e61n^zx6XS@*doy`U1L@Y~5yme-vsa zxc)+-x*0VqPSs-S3}tdleC{|QL_pu+SG2)_ii#*e<%;=Qou&5Evv%{FwsIs(r;P$xW2jB7@kcrd7fo_wZc3 zd}Q2j)563e{?c0fb21m9J!n#$e`x0_w~9c@aaAgmv>i~ZM|E3LSEZ}gbiTbDLh(%Cy>#VQ!+ zwG7q5qx*&={jW4FBU-Y^8?!3|Nm;j;^m>Bk2941)NJrj)_{8a^aD?p`Xnv}?MYI^$ z8cy&crReg||q58dT%!!tF9U3&=NB-hjBI~$m zlVnzFxOWnVBMTivCvu0pPHW?BuSKkOXA57EfNInF{gWIF$R;AAvr zV1nuoTeqSB4>#?N@cOqly%6>Ace9J%NRFqHVUoli7^jdLzRJi;baBJ|;VuHlY69fz z^)rdHWDGJt-9hYWvHq%biUgg%wyX+Q>^Ii=xcHd$2iEF%ob5xrB7b@|ntR?4MGVoby=J|}W0&+a8C!=vb= zdNL$Ew4-v7s+BuwKv&ns&h948+h^uTir|r_}Sw-cy;Y^Va|2n*z_T-tn z>@J-QRMK05kj-_0@xfLEK1SyYtT9mHh7!E__KRwk%zaaSu2OdK~bT@$PK2lP3;e(WLzRiBC0JYR|lf$P23PrJtF?6wAl8!z= zHY}32;UnqtIyT3-DTl~MkP@;Oq4#0H^qMKPRaYxdOs}J zk2cz6HA4P~M%>~;W`R3Vsntas{^_P4P!gy##Z) z{vu(FR!cOTZ$rK!SQ)1Td&W-@;|0%h*NM{U`_Y9ZybD&@bvE!LC?z|`9L$7|@@>i0 zS{bC;nHo{|05*Js>S-fZ&4(5<@fTVVdWUL7%r3CL<2w=}jVitHFrX7~mLK>G$w8Ao zG;|0_PX2oY>L}}FDE-tTh$fZ7y3oxcX)rm{j7H+QcpUX3XZi|X`)>8;;#4%YPrgT} zQ57O9?FS4dfBe{K9-*td1R6o}NXFJjwY!rFtLHp~Jji zrM}f%Cy&~z14CIqcGHq2EKcETSp27>UXDzqb7`XZ9`PV!sLw0Fn?HJ#>Z47Ice zIJRhS%l*w6X8Xn_aRqXZT+#<6^-~;^d9pp9cW%-M7;kP8WyUr>T>!1p>wyh-NIxW0 z(^z{hzG3-E2d-e-Zit#nU-{wV0qsLmrA46)B)oNCfyILj?GLyKxvksQsz`qKqa^AS z=(QxyhvRxTHf&U-1hFFsqFWafA}rqalFQf|%;EvS$pBcRCcVqDppHP;Oboy({zKXDJ#xqEL64XeNr@Ph3xi5*M)_?vZCb<>32qC_7{2<}u&0$E2=W z()S%eC>@mWtw|9d_48Nw%rqxL9=0aX6!w64!IzdWhL3g9mI3Q_{Ng>(lyo*yKV!n) zh4bOz4_;B(%D^&-2bV;SB>%+n#12WNkV|}EZ`*X7$WR_cOf~~jEjx* zu1hKKeTIaqh~?4SXFcj`{qWF8!JKSwWCR|-x7;I!>g2g_8?E+F*CMSzYE$s-;8t0 zG%rK^x$0}?2;SkI*IB3LR}3?jdWLsxP;-(}s5U9(x@C20+;CXB8lp0{J%hJ3SeXEW+UOob2Mn?!BS6A`$b zZI-YGu5`l+X7Nn3-A_zwGRB@sT}f6ulFU!qq-U)|WidT{5c0XcS-joIkHsQNd}|$< z;_XW=VMfXc&G(C4raW=4-pd)qz86-D9sc}UOZc(a2Rv4Tm#a_va#RtA^V7LYzn8!u zbSX(e3&m3DbHXAHEZ06XNal;=GS*~R`SgRzGRnRbRZ5(C-IbX`-wj6Y1A`0H<>>lS zA&uBZAwh^ZS=7;j;gQ2T8V*!xyk@BM3#}tV29EO?84xStM#uKYPvS*=Tpycvo@NBh z8Ao|U1Q$4T9SW9=yjxY~^!|yk^Hf?1AZ-=!bq${-Pi*%B2Gf9|Lh;8BBM>|P+ku+&6-n`YR6_!EnS6fed%PZnuDt)wdsMks^SWUHQNM=UA(CoAO z$)m51d6Em&)@H**#o=TyLm1XolT5DPVS~_E)Ad5xYFLCR-)HK}S*CLtsqDjfMz8NX zc2}2MgIoLIU7PC{a1S!Q`Ed?AJL6?LC_7nbSA|VZeO$0?vX{sUhv&U5QF9)Xri;*G z6t@CSi5kW?B<8M#@euy-j{My`E>y;!Z$;5tSN2V($9fiu%cRs0c=9 zB~z1hbrmRa{*o*@Eo|?9=4rWqOTzxYR678#s~N!M4gW27xX$AKzm+@OeDx3Y4lJss zZq9$JdSDTAbCytXzOHS!u85Fu0-^x_TN1${4lcmBIj*Y81^|~i+?b`R3IYHj6&bh6 zCT_m>A6p>p{{P@C#)be^W{%&gEP%h4SwK!#advbuc2;#XHC48=a|W=;0Dzn~*Zlu# zGj0ytz6j)7Vm79>02UQ+g@&E6i|ut)35z1&<{IFhlmXX(0IzS0GPr8w<`=kh-y5LCR0qApO7wF@X(|X=j5_*dP=TgaW#u06;es6cB_0f>1yZ3OL<< zeLWC_0)kLL5DGhl!VaOZLu_Hcp`frsDC`glJA}dxp|C?J><|hEgu(%#a6l*=5L-BI zC@35d3I~M30ikd}C>#(9CxpTYp>RSdoDd2p#1_sQ3JNEL!U>^pLMWUN3KxXJ1)*?3 zC|nQ<7lgtEv4!h~g2Dx%a6u?s5DGVh!VRHtLnz!33O9tp4WV#DY~jA4pm0Md+z<*6 zgu(-%@IWX$5DE{3!ULi3Kqx#ATX=3LC_E4fkQIUfvO+jORtN~l3LybmAt)d#gau@U zz^;q1Ze+&_!2wx+@qRfy5crGt%e#QTJPQc?#rx%rz+avK{3U51M9wI{UwQ`q5;PE^ zXt1;XvhSCsfxkoz{H1E(FIfYB=~@@%ch|hO^-d=fw={Qgye{DSkB71-ngbv$x%OJX z@389z{{xKeci2rU{{aTN1-mx*|9==nUEpT2--2ED_dmcmZo#fa^$#%4Td->--3e3t zKW^lITgY{bbFJ((<}_=Xcs) zqy3-Fx+VFWNzt9{yZcUp$GzXox|v)33+DH>(*Eq&;PLl&yZ$lIgGce-Y5)63 z51t164*F}T|MOkErTLo)>zzIIXV5LV-%NS$1l=tMa0LCmegCL=a0LCG_E*jS#kO0* zznRD1+0s9D890poX4cJ?r z)Bbgqf6EPSo{0a&wp(s+^I&}^?au=VIJExW%D;yGzx3HH7r1%EztgV2&~CZFO{U;Z z+8wRDCI6db!=0qN?E=Tv-}~&ZKJZ6p0>{?hLH{U!aBTgJc9T50vyp!`>z49w(l2*{ z{?c2wlz)@Oxsvu=t0CJ%Hc=r3m768=p(>0fBKgnyHjx|8;2se%(}zu(2bKH&bP zmA8a{lOMa&u0M7@IGX-$*8g7f;GE#^pucMV&lcU1`%RYb&ZgbnQQ#Q*n^`x>!hfON z()>-%@n2}SgnyH|yp#6lF1w}qo8;%6pucq4EzRHLQ17Ju*{oZVzsbVh3Hpm!w4%%IE?D*U@!367(`(QdNScee76oez$qzk~id&G|Fvmga9V zHZpr;7$9^a1FCBGD^Eavbf1%yd{7uIHUud_4f3t9KC+*LDe@pW>OAmK~?(Q;h zEd5;)f7Se7Xt#8Kvk>wxw%yV_WIg3yaJSSCS(f=1+$|q~tnT~^?v^J&7K!e}{ZSy` zp!(Y_g)B4ON&3q@z2ydw)vABN-SPv-qSwFRZg~S_?d%_L;NW_z@BYyrz)Og?aDN>L z{ye0BgY569Tg&*ruZ912Rr~I{3J$fmaDN>O{(`&h3^z-Ycix6O8hYCiZdNz%ME%9G z+um@qNO~vkkA4LXwYR$LuNlw37lWP-Zu`Scy~3TmJ8b(uR(t=)(((1KG){ls zXXd&@%jIUj8WtH#6Q}EulU|v1yY~EqHgNwWNG&l zzyf4u2VdFH(OT-MncYp%)%6?jpZJ1{qD-&REdNJ&)UQgA-)f`&lLOQSKr8~VXlQDI zExg_&2mI^5eN@m%x8h5yqn8V&r_YjgKnWHpqB&{pZM#4~4J)6J->3 zv^2C)dun;TCy^YuF6;Wk$EHrkj+XY$c8(~(UnNv5s&>Cl0vDA*YHYx}8aX;Si&_{u z0>CR8EV72b-hh)cEKe*=oGq?+r0%W;V0{ICAm@v1#QGm|2;z*kf@qD}nQkYjsj8>tB5bX3s9fL1%}HE36oA z`kGi&JZ0Z(1;5oEG5yGF>nO*IZv38JhpT!kSX0}xY>ml$Z~vL*=o#xtJY!#f?GHpz z<}F7jSA{QQc`Q~J@0~3PJLBsvj-OX2F1cj*6gUw*S*t2`)ZI+kEzM|}SX*`^_V8IR z#N67+P`dK?QU9suEv2~4+=nrF-H-h)uM$1@@=J~y8T9qvFhzdx$=9flwm6S2>v+u& zczU5>YO0jAa9NgwfHNdcq_pR4J^m0y^+WNlqR^uaQuyi^fh^x?k!U(MeK$30oa8Ay zE5$tlI5lk?#aG3mesA9uBf}RHTSmj2`yvd84AmyYsZ~T$QNIEFl$TZ)R@*|TL{SAH zR-$kg`TCXkZ|1%%@h^HA(t%$59=#HwuWsfI_$+g36cgM+wH_j-Z*%X3Ck&u@gm~}E zSD>p=F2bSG8&IVQCD7fIcT=F0=5ASBU#D1v+hP!*O3SK>q$VL#PFK5r>Fr9DwP0}E z2mUbKmN+0BFGf%-!QS5`CCu~e6lODQlio}3^5i4atcR!F3BY+c80aO?XF@4#wP^pgPk1NI7H*wEl0^M zd4+uw@um@q<>&)ZazX%NOb@m4d}Y}fD8x!k6``~HpD3o_1jne-177Ii(3ZJ9uy4dz z=x@+jV`RU)I+04=-8#LRTl36d*_gKe=5PJmFd%O}CtytijG<|jYIzvnB~Q6o zFsPnI|Diu)BQ)QMDS%*Yj@S0bvc^YyAbD^5{)aCP)=XdUk1M0NqKId(Wa6EgJ0iP7 z@-0~wN;-Q$Yn>y0nSP=l?>z3$>ompEUaD3_cFDFQhV8yM zbF6TAVk=>rBU%S9RZ929+xH!&bG?jY&dvwBt34WCX1-8-z!!fHX$#&5LZMLtEMiEp zdQ`&GBvQ_Z{yWt?ZVl@A-@DAE2F6_H#8|(TQMrDpFFM5QUs^dVOYd6!lJH|D zgJv}n;uT(kUyf&AU|-mUYI>c_H0n&b1;vsbkpagJUpOTfFY(^ev4=Tt+V>Q;31i4Y znxn^X74yCItYI#Xz0lGGw~$NI>pB`>|7FHAIpxp0BHp2$q28B7Nu7j=KiU|bB~>cn z7CnwbC6*$N=#aAfL2;|0i;q2gVL`@ZHG zj>legUjc>GZnN6OiV8uI#4g&Scv!?`(1FyuiNS*&jiUI)U3kARsz~vlvgz4)8j3>D zc%NLMavz=PY`d?SAK+u>B7wxy4pk4A{B^o)wC*CLNO7z~cPoH3~AhaOuhd|S-Sd>?@;^3|wQJWK~T&9&`X;yRx z6(P#PT1YC+5j*6;&Ek5v!_p^_(byg^AD{|Rz#jyKP!bwZ+My@G2{Z6eeF)>}(Oz=% zA}J4FQg?^1i9mVNis1_7Pg@?Qv6B<{4m+HG=qjrouewgu8|H<#h#sszdKqiCb+<(` z6%;Ko_Q=4}!Dh%5iZDmFp=ymnz`^JNN`mjqn7mLqZfu-y)?`=+t;*3`wg`d81&o2! z#0<*I=1T6>NGp*CG-oV;*~>!3uQ(~8wkcCM67+WOujxjj-krU zqNv265L<9Bo=+}Az8{Xhl60>IWwiOMYsE6}0loOH2>1OW(`MqaH5Jw@YwS0$vM{s$ z1^0lIiO_qn=P-{4gerBPJ|-4+Hy|)BA)ttkv5S%LM}=MXbL6PWij^>U8OH~83>P7! zPub1sE2}5*oz$@jan-`MJ&^4MHd!R?ar2?CYZ0se`tsX@PX`Yd)-29wYVrEMJd6%n zkEVeZ;t*5R>1y<`>WS%EXR-)k7eS~7n9(My> zd>$E7v9h!WjH(1DCb`OZBO+BjQ?Ww&Eg{<#UHHw8oAjPi8~pUv$Gk#n<)mFFzlm)g6f8Z-%aop2a_;kMrzl0FpJ#KFFPvGVe|w88o3^ zVAPcGhVRbfQdaXt_w7>p%Cr1a;=OUGXP!Oj=SvBfk>$~>p!da-j9%O)o1a1S(s9qIXOzWwOpZlz2X zr8ab=Oeofk`H|=mg?2hGaxePy07+eHFJW=$Ov89xo~e z2J^UDH5bx}q~yXSA`*qfo@^LzP-d6qN9QG}{m4E_3{#M7&DodnNqQDOEM?cRuS8!m zumtBe_69sq%50=Vr_NZL4EhNa7z zKYH%*&I>DBg-2NGo`F)l0Wd^-ZWtP9#&#qhWa`Lu@^RJkig045Lqd}cr%a%Vp>;fz z&KV~@ij9ap=YcM2rQsLZ_ML<#gKk7jgSQ9?u;=!5Ax$I=rTPeyh!T86M~GqpV*@9- z%-Xu-n+dHmO<_yZvu+I%B<3__DwK>RrRjry1w9Cb0X+w2p=fsBk;_k2b40(p!Pgb4 z{;;N+IR^wUp1a9wQr_k^5etF}CCEc>NB13xpNIiuk+Px>!NugOOCe*POfxSv_S@IyL@b!0sE922ubq-$SUv%)slZJ4Q!XO)bc|msukQNBR zBcnQm?t>yyba!~1NdUk#vjkd6Dx+>-5@2Ycb0f#(cjc!qmPA%-3$!Wg}u>8quZmz&FhLrdtRwdXpcR)hW`> zH_~AEgrWEgW6GFSIBO&`;cA$UC1>#`@yx20{i$V;FWOXUpwd8Vyyn8yGs~#nD_aLX zOzsD|^MqZl?fW{G^4$Enf&q3I;sO#6dP(}%JS`bSU`4o^F&8vmjZ%CJXwC~G+IYr> z+1zbliD1F`A`Kx8UNUBZ(82;?6$r~m_V9~14KzWX2TGonjlpB^=1|E28ZX#Mzw(m4 zb;Zh1HO|e~R$5YDo)0^B8G%DHt{#&-fi`fJI*l#z{RCwrjC2Ui2o(%<1=S5`eppzA zy~yl4^pH!UpWcs=;UJH9M5eeaYfGreqglgQEx2gR@SaPdG9vWLyjQHDuPTV1QfTLE z?N#8tXhH`6uUd=Tm1;Q>x!z0}8MYqb7a5$Vv~Gbjg}H#0UT?3L1f^=so1BguW)5Wz z<>`Cq*U${|t@5jbx$v!nT7x3?bMoux{SG?I-pmv^j^7xMIB*qs7sL;Dl0YSU(zR+rxreg`n7scfqw>h@G?F#Vns?~=E&6(n&Lo40fgu4&0>aZwM zYD1p)D~R`VDhB!xYz!+bOi~u5_ywWx3kD8nP((DfUonUW^7^3HjgI$9wEo-;A(4n0 z4*TTqgG07)(QK4w*R`+xgMJ(ZzEue0z}!tTe;O1}iM`lMViy>t+ZJTxbv9-2EHdm3 zWx~dsL7<=73yVu^48#%yArwuQ$L2`S^$ks6hguaUG7X|dQ7=0(Cfw7GG3yNU$d$1t z48NB6ALDw`v-vOD0QIq@q*1G3)mA-bG~MC4DOpuVydz$dq`!a5*nuJ62-;_Fe0Vc@&&n~y{`-TG1TYRN2(g;=+&bt zvF0^`8riw}S#MUHaa(z@EAhueKU8^PH_#i*4YsFHbWgV?@6QdiReMtL@W<#I=6{j8 zzW@@V6`~*wAa5pwl8}7C!~R)nD_1+GD>qYY@bQX&)Oi%{gWyM0j%5ovm$kTSc}|xH z^$4=AR1UE3VWy$(5#tqY?;by)sx(}G&fY2o6%-5`OFB?S3xckLN<+sM6ZR#?b}}-s z4~$0_#ug+FV3m2;NVa$i)mdh&Ci3(1yJiE9LQQG{7hl{eJq8ZXD!r`N(9d@Qp^rrd z8-|=%j>f{fw^7s)>bAGN)e#tQ-?rK(&g%}pz|ARWv-*s>XwWPxbRPg0)0617O|_>9 z^^#izGYgtk?h{X+M@|Al00`O^O5VSoe4sB#G9n4xaIZ2wI*P=EATKH3Fvb!P9$X96 zvk-?GBO3&R315a-hM0lChpwUE{hD@o(CnN#`$*>8of~L;=zY#}Phq&gFqc_Sa&pl< z9jRs01-lfQt^Gv88IK&Yj6m3R;V3jA4PfY8_9;MU!4ac*YW$;)9`!2N% z$*ZL^nx}yu4RAM@+B1~84$wTc64DB$RIJYFX@0^}CJD>5xWdmc=@FYBbeGe@`+7hHN94tOLTf{lWbf%uVV@wf zL+!##9wB;Y6_R*AAj=uBp{q!qdgt(5jzs@K*NZcNOXri0(QzG;c+E#&@b&1=W-L7k zURV_%wuKW6`Po+qU9_Q^WHl_FRW^wzkY%BgOL=<;+A8lGO~c64KNYm~BY8J;W>K*V zzzq_sMIx8D-?qz6g0)dvt;7VHmbANDC) z$GEB_=_?ojU&DZZesJ$%cYW~-q&r`#Vf;p8R`uZ!jl#Z_bb&deWWw2Tk*$L?lSE1i zvm)$q*;1G^^1})X?fYGheEMI*7+%{b40MHT}-BsVN4CIMvABx4mzY=PY!xn z1K*E~2CBLcjL;a2HK*1xZhumq?AJBaRyJVKl?rS+VC^kyRxAjOnUh-!_g?=YS+!km zQvhm(*UOqQw)iMCIBHq(ydgbvcmvy0*_R}1G!!0cD^ng-Tdc2Y{UHH7-p@}KH97(7 zKNFGg1SSl;s0PUj->?pQ@Xa!bOhY{Yc$7wN`M+vif3Wm13WJjM^?E{e1Ur&b+6l5g zziPT@roEUc-^l$8K|Tgl%vu4`qIw3rQ0E{I6T_x zYDGq-J16v!B{AJ(JUGiWA<9CofT@7Tf+0a5L9}>_Xd~KS`x)_ltf;RKG%qv}-&7dq z!ymw5<_tlEiv{2|R1bS(J;i#!9E#@~SSJY-85l-Dl+hqy3|+m}Fuj)C_36+LG?5k( zS8fg(;@yz(z)?5)12HKQ@*;E~@%AI)&ClI3B47K;l+Wn8+0|uw58?Ktj~$l?);DY{NP<_J3*4(QScM)0gd0E>|Sk` zAIU5~pOL*}fkniG!$iQ`7eze!`N9`=hyjyn^dbo=k%MtFYDhH-XkcyuqXyrIxT)BY z01Jm9Uq@Rmv5*lM=R=M#1Xn}MN`|Fle;LvYQ(Z~$<%1CEu5IFv@PIk1AVm%i71z)< zwQc*OXH+MS$>AiOBEe+^qACjuXBZ=GvGLZueH^aGuRflAN)b>=98AkFJTOWAgo}Ix zlGUm=Swwo^jI1kNIYwQ{9`}+2N!?jgod5bRVZn4CiSwo=uiNwbH;Z;+_&HL5|BO9E zoM|lV$8#&_a;Aj@__23FWwK*Ty(;_tW1Zj0FuY@ahM<_QH^p8moR^!+32P@2$T8`D zEM)IV`sj?A{7^j?*kUrafi51I!{I2!DS}BKJ^6E8p&C8c?!5t|vbW`_(@P-w?8nC&!HN`~2=+CpLE|bj;@VAKs z{%j%N85-uOqxZ7MaTK+2cvNjKsWm&`dymnMM0Zp^zGKPkR8JBO8NJNT+hFXe_<7*^ z*O+hTM1KC0;{kl(GY?JNK0Ot`s1+T7%@vPCNkAhFW8dKF&BU`Jh{%z%%oPcwsHCvP z*e`Tc9l&Pn8`ZYJjYUNO_zgc4W z`z2`jfus3AYG6AP2fkG7_p0c^Jdq!Z{TeO3sSO(PsFe2J28?A9CT8%vGvN=#A`qMp z0H_5R`*hp;a6f)3;m_cqhe3y6cbFB+E8E60#?Ddaq~7b5PFxM#3mRlb z&++CS#?6BaED<{+)FLcVNID68G7Y!21vUP+=vC zzX{EY3z4+;xu=8G1qnEbj=r?d(dNLHn^m9XvGYBLJe zq$J5D(EYxXRxOnz!Gjnv<2S60&1(F4Bf8aIMam+&39%+YS6^F05>&^N>B_y$h6q8Y z2b_&{#c$IE)i?}od~n|#+K&e)+E0B}tY}P^cz!v|FC30_A4Q)AMTUlKY&u_Sy)tMR zn2JX}hM(@2910y(pL`-qVTODj(sFhxZJo!F(?+UxzbmW}C_YSr z5>A*4_fv7{UQb8AnGP?JFidPV-jm>nsrFTt45ZeuM)Tc-0crEcPxOldunKk0&nd|B z1fO35sh`6&8Ie`zDr@y)%Nft+z_y517v0l;Nce`PxR4KLg?0UNh!J`yTJwqy4qPby zqxoztE$4i1A!9#tKZ_jS>Dq3^&?so3VUh15Ws1j?nlHUSvo&OR zS?ue`3q-3AcDR3b0eK@pU zHz3J7Z(BotZ8B-~-aT6!9*Q5vP*RDFG|1b51DQk8tt%@(x#%4b8J}Tp`KX*8^>_*Y|7+|lz@qxveyyUQfPgeeN;ktWL#K2~r*wCB zNK1EjH%NnYOE=Puv~)JkCY_w4N#jy~b-Zj5XP{?pU0k+oVpxTi+TtTz8Lw3sR~YW9BLF!#Tyzw58b5yXr|C&oaLl4# z6XA3w{UCSw($tiLc02f72@EH(|5G@R+?my=;Naef#?qRq3YQlO3_oKi16@pJVA#qD!*82 zgWR6KA3dfYf%UZ5o-AUp!%;yykRp#r+<-M^8e2D*o*vfBK6hg}_ra>T`7#g4n{XmN zhgFdVM;LGLf$aA?)XRB)!`i$u@O+QcWv)kY$Ln4j$ysRQ_7bUm4T!N!wz8{XjIdOP zX2b1Cn;1U%hIi2~A=*jQ5w&f;B)N~X^2NBrpL}E-UW=FAgpHhPlIEl%UV9^aMFg*= zrdMvZ(ntF8H#|IOaQa6gm#P_h;(F7*=vuwNX}?)lu1`fIx0SJ{ZHAHBB3+}f)HE_8 zsRyPnjQw%m;<0`y+I49ekwdjX)|Q(tiG4?X62Tr{xS~O9aA%mF3>Yzy4J8SFW6?&R zp-jBoue92zkS1u6hqEE)At2V1veyM1Ja7vcMcz*493;%|-3hP?z3idsO3Oj!_@QAi z;-loLg``xa)RyX}mqcFjW&-0xJyhAWZtV{(Q_?~pxK2#>hX&Z6wxj*RSQU$K%`ah6 z3ij04j_^T%y{BhlCWwkuKjRY&>Bl3>VAUG6d9PQ0xRSZAqUPL{pz2OpQe$Tw!liAh&EW9xh$oKb$vU z&27Tk&^7EtA<_SO+ZOcIJugy?NXc9FcvgqoGmhoiWWzgVu5jTmQV*Y{M^`<^{ceqy z(x>jKnnF3fIZy_iUdoPP3@7YF_-kh@!(93AyhA0;9e*1&8t~9Kk{VG|m;+m^&1fJW z?z{|x`<8!rGkuOHanUnedeVq@G0s(9!3Rp*rM<4oL?*?OGo%9ug1qGyQ(xCuDqT_S`?z+{~T|B@&J%o z_=9WUp}XaP8e9H>`-j{cp?dtZ73z{=y86`5BJt6OMqI9|3;o-0892liqlTys8LD~f z0xn(;UsTLw49@_I!f_pUAJ)xVB0b^nYU3qhZpLFbxQ4jd3C_!i6YbC-KSR`BYV~RX z<;Zq~GMu0#cBXYSieaUGG!aEp5#rub6G6ofyxX!gd!62XxSFn+Lb9R7Vt4fxyPMJG z+1{N(WdPzjzL6!39)s^*#g&~098KmcH^NYLZR-U={lRSy#kT8@ri7$E%AjO~KepK`DG!IMXV81g+h4N2uninQnKnbf)C> zrZ=ZOetIDlFZBZ=t&KPWVm@4I)c6g&=<^<+)1d`r%oB;6aE!XCy(9E-XH0AjLXh<3 z?irX#O_*mXU`Lnk>D{W0^z?52jjd-y{L2d2WF+I@R@6mal#F52@P3lMzU1=Z()Sm( zVkWjGS_uqIUuRQZdn_v^=d#F+7-1(r@hEC?CYf&aA;VlqxFx8k8bjU_mCV0FzVbb1 z5<*%j%pyToXtJo@SISa#68TpD66)Q(&7QcfW5a z1?P(kVQbr)v8ND?dIPQ)w)I&x7CJrN!UlX+a2H(W_8N~`TWSu1cg5Ve++G@5t`XO; zkaCD)nAqzRkrublCv8j-MQWrQGXW%{hQKyU6&vXV0&jJ`eCIFgPbvz`+~~g^eWvM| z-i?N9HIzvFhd65c;<_X)>sDrQJ(s_dN5IVIqXX-E(QkJ0BI^%_AAb5e)ouziP$OHr z7V*o1@7;WQz`iH`Q%>%2mkU^F;rvyZG5xsZKk;>DuNvXQk64UK@`?3ug4{Si*_Oda zW`bVKakNqjl2e_B@w1kY$9QnRA>2+!v|*rhx$0mH{me?mhb(+r#+BHl!k?ppeDyTRY( zu-^7*MM5L8IfAn;zAg6!F3uhw*Xvz~NvoHB5{}r#g6NhyEu1<{Rg3r)J&lNpkeg0( zk{L>EB#*vsjOWs&jbb{rGdS;J=o7st`@P61UhKC0HdH}!pMo{c)62YwR%0rI2@dIl zbivNAzzm%Go+VF>>t=Df%;&8x?f6GKbVN|rsWRw6v{PgrsC#I z$$=_bpUJ$dlorak2n8vGwj_`U9iY7Ab5FZlwr^hgRH-Si{=HfZ(b^0~h=J0lRj;0p zF+}+VO~*k#wzNJatGb|GeLu_@Hr~9aJSUCDGilPJ=fmIbvxNdhyRlJZE7dj#xJP8i z3Zn2UZ|IgtoE++1>)Ja_imnlM1Vj~8Y`cx)ZGCxUfI); z9i`kR@xETI*hDDmW(&_cJ~1ioRsXG9z|qE%;juIRoO|R;?zwnxts{7G|5Vvc+!2|e z$xce#y6{MZKcAbQWLKpivFw^6$So z?L8)g)eiU(OjbL20z+=&=2UeG%^DGjSLO#RN54 zRBq}d^SMT%)C7l9b@E%S$y(#H=Y?qhBQ3x zhb{m=r!o7(2LHJW;migqgM+;Fl_#npRo3Z`>*tg^EFE;jb6NJm|Q!>yY!>Ik5E^!o%syz*R9}Z8Aq@| zv6K-h*sMQPpgyChP~)$r6^QN%`Y@VLL{i;cg^tW1eNgy|$FUus^(XzL-Hp=lBGJ%FI zH@L*0NKi2F$g8~(h|Kske|7gLT95rCP2fA>t+wWi{^u+&EOZGabjD# zc3fdS3T$IIsh4>zI;!-al?$+Y3f+UgkfD|kuJC=ybTW8OKy_+gSp*qLyg?rY;%0D-bHJ-X&`JF<(s_!2@e7;D-bR--sC!xwFum<+63TYrRz z*8Rn14Yv|OF%2K9yGrXJcWq$`1CwCsR|m|R*Eyxd+nd0|*RG;He?%Kr%F&flDLRn8 z2h!)3k9&U8yLjQr`(4uI(>1>aeJizRu;!h6DR%4&{V#E*N~Rq4G-qIUAL8Coy%_^M z{ghZO(S+0^FXN;g!Bd;l{5Tpq1~o!)vo1uf!nUZ6BSv_?dLLLnv6vs~0orQZT>d;! zns&4g$|Enzr%Z>0cxA4_$rDpB5{SGLhl7!H&rjyD9}2WGiY1)W(#GEk^`mD`B$#f< z^SPCV$(Bmp0!saV85eXEd>E5=dRe1ds+}0)d7V05={T_R_O}XSvx7_ORWVwIMiXQ$8Snn^zb z{rExjQn`T{cDmeBSUntdf6kAE&rf4x zT{|5Y^Ocyg1$}OwUz3L?!f~^1IdIoA*aK_G&BtJjOLIJ$RZ5rn?$6LZObEH(EydN> zsC@~8pl)Mr#c2+&i@X?)nua-4I!EJ^BU4!$Y;jLx*Zlg{Wxt;j@P1H_jCu!mLKUTFJHC5)L(xcyLT&8ZKO^VW4AL=s1QBV2 zQv2U`{Y|kL8n697@vHWq^oQj8FHDvwpLFZb?rfyXYFQH5szl2xQa`lFty(;aIU+1)eMj}XPrnX%gB?+-&t8)2ry%e6i3i1buNsYe4f9@c z<$dmAp`BC(Szk@$IyXM?ypgD1pir0Z7(Bkc2lTf+0^!_pKg!#O#F!+BRDt^Ri37-_ z4f~>^WvQafEP=A!yVXnx6KWy9){fnf3~f*yMf;|GU*5SCM>PRN&}#Z7e>jlJoS31e zMpuK4g;5Q~WJSjI$RmG9q(OSH@1d>%AU>OJa^#L(b-i0_$Hl_G3+hwJh?l2X!F>+(y&k$b;2wUgyW z13z^gs9ILrJ=qEBTGmPO6gZL=vBjDzuHO)wYg7tjkYi*jEa|i;c{=R4t%L&xro8VTxGjD8CzP6?|E~i8P0=p*Bg$oa-DO8>rU6e zH&Qlh^?_u93thnJ&!Xwz^Y zFj=^_be7bcT}qAox~||_R&>^C*s4!aTUL7C<2PQNdgpbmaSovV{v21RnTO!8?g(i8 z{ULRk<1K2hF}DSt~2tS7DpQm;>k?`NFJ4& zGod;vizr*-s1C~n)rMAyOQ~yV+e<5PA$undzX04zeDBc;YJ-c#?XiZZz0rb3cHVEV zmsRgpHyAJM;BBX@Zqy$8HH~{IvFTVl3s9p)qDRUiPKFd0<;9R) zWN;bPV9ndcF(OakDJN3(|Qq>$2S1?eN1U ztUF`#iQZ*S&+QvD%P_S{vxTdhxG=S&E}$dB+%>CoS)hjpkyszXKp2JSoix{VyJ7M& zp=^WIK3(wPdoE|b0N1@NL5}qAw(Zg1Onn{W!QyM&<@}s{H2Vfsp0`~zX$ zhSGV(q|;-WU=+<16|L)D9VO`+^052Ivf;-WYTky(fBXdpx6UH6k3zpHr_75QkJN|T zNNcL&?>B>cCL1fE5B@5Pt$-Vpnk3gz@fDR)7@^YFKKS8FW}63$Af3LxejJm%dxT!e zYr{wcZ5a>qcX52;$doC6gjHSYU+$`8WH%oR=eBGGv2kE+mRI~4QzNNn_?;nS*+aKL zdGQt3nb5!_Pqh7vn;rh3JYEt-#OGV{_-hIHBA~4(6OI)wV%Xp*Rygp}F7rMB59DSZ zfk>`wh!E)g;Fgs@3VG*;4STfzae()yL)FsR5IVo$_wh_aWHTVKA5Qgm;T?A+WA6|g z-rbgceyw9!Dse6_ELV`l8k^n!Mv!`JRQ(nXamJl~h$iV06y=v_cLn!OK$mePMR%J5 zyeC^MXB0fQQ&U6KoiHbhl5Hn(aP}dJK}10V){Dl6&ijrNfYJt-w@k8Rd$BJ|(xv(m z^jk?!eD;OY3FXfSHfmy-Djt1eS4Iy1+MT&a5(5lUlu(X4j%NaI}i>bX@xI7!k zlZFfXfZ~wjlJU-I)x7vHOoPnsRB_)Pflmb=2YWDDsUkyByEqDAn_68#dQuFX;zgcH z#><)xKXW3vM2$q!nc%eUmI)aHXPoBv_)cosOX9D%W<~)fej*4XsXC1uW$uxlAKquX zr~Nh(5Z<}w_&1+Q`_enUkqOX`L!FtPMNaQuGgIQN97v5DF6SIEfFG5zxjfM%e zjt%5+5kZE0h+Oi?@j@zP*$ek0{R&JB3@+;$O`M#)G5hH@8 zU*D3hiX@DiJ}YE2T%g|Y-iR~`N$)#sQ3umWCKtt{GW|H-k6C|{w2pJJUaMu%As?sP zc;L;Ct~Se&???4aR(Hai_KkY5IRd$F(Vh-5qObU%PeNFT@ked7Q37Tx^VnU~QzuA~JGebU2%D5-A#RZYdFO25{CSX?}vuvSEH?&v%X z&WHy|1N&+l+(?<%f>SPymG?zwt=n2pSxowmyxiHTS zExRqIvCkq9SB2%E6iZJC0}XQHeM>%(75fcSIywg#=_)gG%nZIYdC+X+Cy5W5DUmv^ z6sQ`6NTQ5(-#*7ptVL;XC79^KpOUBM$+)5(mPQguGJ2C9dZBk#c`pB1yDxVPYv5l# ztsMiZMSV0jy!W6S@4iN5Wz=^&-ShitKG$%z^trI_SI!=FapNN6z@M4v9kt*0$I&%* zDuNz;{l&XrgnowSn|{OJ+JM%q^~#50)_`Vi!4VBHcb3>4IXRRS3`U@p61BOs2U#N9 zXLOTh`HpGfLh;Olej0e&o#mhF%_{WZ05ky`w$qecso&MVRK-h(h^=dxf|ursYRE`U z4kim~8u$11IgW3PQbvyF`ddV#0GB{8#Y^DE8(OaCC%AHs_!QyFQo7IA81f`xHuGUN zy-mE+CUjq-7ZI4E6T*wbP}t1`H*ga#D!PmfisV;ocDCrY3Q5-;TE;lkj`{uxFKxI? z8@fySGck~Tm>TFoc(<}@;H=p@vK5xm6_N5r@~#ZZ;(1j)2wGuft$Ax+(8ZcHAd_Q2+xv$x4` zqKX(00HT<|GWITfgO;FXD-+`TtkBl)Wh!@I>x=;4QWaaNrhW^uLD?*xYm(Y*^7aym zIr6-|ce>iMrQu0wJXgD%_|4H%vRLf_`8gd;>h<;Wq=(+*2b{|;@MX^Y5{KKy;xB_E zcNBwj{ggvt?_k(GS;e)i>X%m-y*EfgjpVXvrnCSgU#u&NjnL=dUiBZXwB>K`pDQ)|| zJXnqMwW#!Y3v)CpV`#OmJ&)CBF=N+%gfrlI4SRNHbzJWpbY2r%VPIgeSBQS@alPM^ zGTx7D6&ks}o@-ajsWobyRr|T%`EWuku2ws`)XE>xSf-c9qv3R%i`9DVHqz|cHG3=rq4Kd4cS}%>EDNJf%MDnD6TN`LNFAhtaS<>%r$mzM-A6{T_W5Sd`1O~Fpzk}I9WTo?Bf5?}G%$>1UF7rTs1>)7BOU@@B`Xs?Vl1$vMB@24ae|z7o^n zb~q!ok^5FYlnum?Yv~~-+g76snoUs7&TUYIhm$~{{3VM>9-PTGqHN*|!}xPXM;-ER znV7(_ZJQtxLaC+v0gc)im8D{?55HuQIGUQ)^=b8a6uKhWLou~y*XA23$v4be8taw1 z&wIma%x9xoj#@HL5Rmu8$ysPQ9BG!nne~jil4tyUKBm2UEkWiiPv|DKm)q*Psy>iG zqJAbcATW@M`CBAo9o2yBo8lrBJ}FD`eVk;h`n4K;idTnbq==`u0P{PU=@-G?Y(QHe zPK}BiV`s`v!%oPoIyct(R0}TQ`vfpkXoaTiHlabkoLi|>@VuFnX-!A94Ye(L8AZS1 z$KsfxjW2a^t%{vM_}Fm&+sweO8#0K^eHUU7^|E#bMk9HTB<$oi)5 zh_y}pSYxPg6MD_ss=w1s$NxvTfWkdo9LkR+mA1X zlm7ZPkE#}ywNySlKZq=<<9UEH1z@IXE0kS z+x7;5qg@W$si}x?vbWnh6i8;UmC^2<9)@mo*^VF5ty5o2sMH7j+|?~xLvu-$vK-sP zFk6JNF5?I~xG~Mbel0qB^2%nEGQAGJI}o;-ez13}#)hRw&J@nzx@AIqFZEVOC2!fkD)(!&cQtSfzxA_{8*Ao0RxsSkc1>TlQ?!#<`K_e^#y0q) z4WnA&(0mc-eo2bv9i_XWP0p%!uO;v=RLIL#tO@la;DpXZ1$ni;F=zw)79;sJjV@`* zDA;FZuwh_DGVqOc&;dqN0_{ZI%odBrc({6jIgG>}$QGMt2_~IQ^0|yHZ`LBsj4Z>wPEQE6x*4#(?)D!-g6M;*;@vB7~#mH@2r9=qZ_re<>iU>ndbWiiKXV@Vs*_RQ}bW@b+zWY z$jC`ni@OK-d&*8brE1)}SG@ficY5XrE#Iz2GzPkoV5d?x(pHY*@nYz@jZfSMSTOyu zBAe+HFzM&JdF@Uf39NJYm6g;<%Zs3-W~_wIs({O9$s4^y-TI@p>(hkPd~joy-0>0u7xUL0Ak`tW!3pAAT$6AdsKF8O&`gs8;|g zY#SPB+o#j_dW06DqZWS?7IZ2c34SH@$??oGKr^&OdSZF$+ve;qvdph`f@jG_%r4Ax zUZ(LHP0f|5jVBu|+2#1Icc3Oht+kK8*f%yC8GtK0+|c%V^)gPioGksqcD z48RK%K;1^2|7--)o9l{BhkH4v#v|l;zgKo_Gq1SY;yVAUF{zeokcA7?eJ|fg_-v>f z{~;2YmhCdUZs~S!xpn?BYP3I?w=$lW4w#6?Y+P}-p$$_Mr^lsGNBCX>ryKHyP9vjF zD~xKz*PHzLc9pva-!lu=aSG76J2H>E0@s)0w;#-UCs+HYsK}eSL>>dzYFeub^mOU2uz3@Om=rnkKAif{v)-qGLPh7 zvGL=1>}Gevy@xG1_tje2=ih~L7C6eI6m*SFojJcK)Q%sHzm}g0$I)1RS>*VjOpCEX zmM&=QCk)DCR}F~*@L5V#mTQA;LG+YNS)$*-rf=aDrwN?IK}o0Nma_vhY&?bTiEZO_ z3>=DT^TZdto?!p*cL;J=hI-l;LD}wa2JlsxZoogY1i#TY#Ws9S*u2i0pQRfpaImqP ztqfE6NK@0XWa71u5>;uvw^Ft;wT@b7)(-~I)VUzO>DFg0c%vP#?lYITHog;#fe=Ff z%d5RxuW`vtCR}e4Hn>;?%MLzu-&aa%{BvHkwjY7a9rn@pG?scDhLnDps>?Osl$Ee4 zX?k`+D;X~)0+~c3Rara3d>zTH*lWwThJzNnkx821Yjsz%+;2_;xd-FX20f^S@f z?kaw#@5-11R~hV&Pvet;r@FB(vPGn|#mhQh14QShEUJ19%?xmBXAElR%?zRyvp<_L z&xQ&`mld}g8IA2}3G9x-&pwajELxS<9Zi;W&{@*uFwq6oHn6sC&5D^%fv^0{slc_p zBBmjsJq<#`>O$5*#o&&y(CFVcqabj$l)Ch{U@RXJZK~g&%8Oomr+pj4Rm%>YMO1A@ z7;X@{lWHzeTBbHw(J3Kdd7mO=KrR$LYZT3l8%`n$)9cBxbibyMzI)#r90Ax(M$uM> z4?=K>pT;aaqo+nqb5oG);&1n@WZIaX@sFU*$7hzLZF44k?X3B4nft!e{^DPiR*l$3g+#MM32NFq zb>_9v961NU$)LW(QJq{6YOXMu34r4`9SaSyK2t{5gt?uWnQRx)l)13uwKmKU^UtQv zre3#>5SAOOSY2mC0l|tw0CsQ3Wi#4g%ThqL!j`W=fUmr+TUQK`C>>bUJX`g>R5ci1 zGi0b6@z}w8d?LR%$A3)rYf1tT75kzABDEEJw=czKiG5u^Rn*u*o)EKQSBvrnL5km2 zG!FAe%&ZwI4ZrKs5*CAz(3onsd>&U87ixCH?rqoa@)Ij|Jk<&Ndb|%vg@4eC%Ii1Q zOxVy;MU2x!2M~%-T`|CjR9~e$j>B{GsMs#m7}pfY>)b6oJDBwSV&<(CtQH2}n0eJl z3tA_OshfCLAUBIpcjb?3THp9q)P*K6t;Q9;UH4a)zDI7k8~6GTNR6k^rT<@`jR1{^ zzL|r*ow2UAls*K=^Mq&fzwKW{41yLA;*GHdg!3b*tt)G3u5Iz($-jYZAmEJu0^2}Y z7XJ>mfroN%{{961!{7fq``05VuI67~|M%Zuh#m~g3i=Pv1+8cO2M+z;)&KXeKoHZ& z-vFDZB&`1dOY*eu{-KgUdhFk^By13Al4XUBfjieRg=nF{mS zR%^%%PqO9N`{k+Jj3f~9E?(hx9*O2Wwe4ka3r!QA)1ZP4QWdU&)h@2V;g^n=xyA|C zg=jFY0Xz6-DeOi?kw}DxR(QW9oV`bw!oy#DrM7cBL1Kk*{KsJ07NZu?Pgn~(idgjBvY8cA-`hQK)|W=>TCIcE}S7#u0v zyGo_dLYi5MC`}NxdSKvRhumki<%P)6%Gp_lX&JHqLM=W?`%)=Jdn8-0{w9)XVM51+ zs8hr0_dA^5=K4HiZer*czopr~SrjBnSvHW$v@@r2nW@ zKXc|&&E-7Idp?Por%(xI8Vw$GXTEEgg9V!W#NrxKb^WF!eC-3P0E{?B+0!PEHc825 za|v9^^Q?q#>~+m~{!Ii`*K?&+*cl`%I;kELh;8ygt1{DK0Y;}0n{qK=sy6tVn)H+) z6J;3vQ>%`KcC`dL$VUVFEC&r1pPWchV)P?FT`Qq9<1D@7oIc0A@ogkobg07p>GgnH ze%j_Cu^fRz@K7eb<0u-aKv_xNR!#EmM9V=~v4rNw^ElyVkyDT=TIS3s8f?lBT4@lbdL~&8Z?#^mYc@Gs zRGbW?e=S|PNnblhU5oi4_S|WLn1G9@3H5@X_T%g1VEGR%P8|bLsi@+4`i@bH4a4c4 zRmaC-X@XeO?;o^?9rBJ%FHGET?=DxPSL~nP*Ac6UG}Q($1fMTN$f4}nTyfG3b{Lva6DFki0Rtr z(cP|33dts?n{n;EYCfJ1SmEBWh9XsuaTP@pwtg4j_0a1JI`TF1lgO#A{P`+za^F5L zD%*WwlbLdmK3wW(QkgL(Nh>Q*ENF7qeb4c3hj^r~Dl}XfU=Lp8;7FCAK%f7i+oH+5OA+3P*BiM@Vqf>s z;8PO%ii|?6t$=S6?4fl?TX2pa$mX$T;6mTuTDWtLv{L-q0(6G{*>y+BQfN8RZRPd)jrjb0*KKeqBg4+m#W*~i?FzqfP{bqIfp4#ZB zcdu2}y6&ExK?^_Shu58Sz27{Q)WklO4A4Uy2d}~5gcsL)!(X0^#X&3_f1)>UzrY{I z%)sBfjg=9>4Wa&}`YQ=>Rxksv6VA%a9U{Cxa*Y92>y<#a^~Z2i*;v%0&ZmH-&HZ&z>u(LYG^7DB zxz?Kklk@Sc#55FZ&Gc;hcegDkc|LldH|6+ zCBMC~nH~`nke`KFor*}tUfagbh3J!j&_6m~Cp#*5hL1M-+7RX;5v91kvxTLdKIA$7 zi6GoZ0jPPGQh=R_kqHQZNIW47L z|Mfux27w?RI|HJBWso+A{`prTi~o}WSfNyYHh6^rY`0tUsuo_E8=9>FdkL0`&tws$+%Pj2~re zP=e^=4CG?7JWAlAf2Y`b3A7xBXKg^>H z2o=da%9x>o(?=NyYASx5K@B61GB8wq^(bS3S{ff^tWY!Y;|ywFew496UD1DMKt`zM z?@{)TvG`HO1hpj2FE*q{GCKW4zwamvgD)o?$m13n$65G3c5zGr5BI**t^|Ka?= zPsb@U3)DLOs9lz)V$vve88vY3}$|MJ%ZdedeYC}r`J^gWGXyugXQVm2eUj~{~(u?Cu<29di&?m zIkP}^7f-T(oE3ln-yqFCc`aF(o?cT}fKY4hqhleH>uL71pIJap_DBF0@YD5*g#}6| zf7Ax+)Afpl6{>@JRLA!8It@AWNk6j!pjP<5kA>htpJu?PYc?A*)Ry_^Sm-^fM;YW2 z^|)Vvkm&I_16i1!j29pq)4z{NI~#2X>emJyy1SM$cF~7!P>C31EiEB*Vd$8IvitvG zo&)}|H$xu@KmgyI2D(hTtZa;I%#1n)dSG2WHbxMDNe_ZD*V8cofLVdu@c(nk|Gsl+ YYX{kr|Fb&=vN5v&;K|4YrG?=C7q_CA!~g&Q diff --git a/docs/development/token_generation_performance_tips.md b/docs/development/token_generation_performance_tips.md deleted file mode 100644 index 41b7232c9..000000000 --- a/docs/development/token_generation_performance_tips.md +++ /dev/null @@ -1,40 +0,0 @@ -# Token generation performance troubleshooting - -## Verifying that the model is running on the GPU with CUDA -Make sure you compiled llama with the correct env variables according to [this guide](/docs/build.md#cuda), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example: -```shell -./llama-cli -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some " -``` - -When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines: -```shell -llama_model_load_internal: [cublas] offloading 60 layers to GPU -llama_model_load_internal: [cublas] offloading output layer to GPU -llama_model_load_internal: [cublas] total VRAM used: 17223 MB -... rest of inference -``` - -If you see these lines, then the GPU is being used. - -## Verifying that the CPU is not oversaturated -llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physical CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. - -# Example of runtime flags effect on inference speed benchmark -These runs were tested on the following machine: -GPU: A6000 (48GB VRAM) -CPU: 7 physical cores -RAM: 32GB - -Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML) - -Run command: `./llama-cli -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` - -Result: - -| command | tokens/second (higher is better) | -| - | - | -| -ngl 2000000 | N/A (less than 0.1) | -| -t 7 | 1.7 | -| -t 1 -ngl 2000000 | 5.5 | -| -t 7 -ngl 2000000 | 8.7 | -| -t 4 -ngl 2000000 | 9.1 | diff --git a/docs/docker.md b/docs/docker.md deleted file mode 100644 index d8922d77d..000000000 --- a/docs/docker.md +++ /dev/null @@ -1,86 +0,0 @@ -# Docker - -## Prerequisites -* Docker must be installed and running on your system. -* Create a folder to store big models & intermediate files (ex. /llama/models) - -## Images -We have three Docker images available for this project: - -1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`) -2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`) -3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`) - -Additionally, there the following images, similar to the above: - -- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) - -The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now). - -## Usage - -The easiest way to download the models, convert them to ggml and optimize them is with the --all-in-one command which includes the full docker image. - -Replace `/path/to/models` below with the actual path where you downloaded the models. - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B -``` - -On completion, you are ready to play! - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 -``` - -or with a light image: - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 -``` - -or with a server image: - -```bash -docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 -``` - -## Docker With CUDA - -Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) properly installed on Linux, or is using a GPU enabled cloud, `cuBLAS` should be accessible inside the container. - -## Building Docker locally - -```bash -docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile . -docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile . -docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile . -``` - -You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture. - -The defaults are: - -- `CUDA_VERSION` set to `11.7.1` -- `CUDA_DOCKER_ARCH` set to `all` - -The resulting images, are essentially the same as the non-CUDA images: - -1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. -2. `local/llama.cpp:light-cuda`: This image only includes the main executable file. -3. `local/llama.cpp:server-cuda`: This image only includes the server executable file. - -## Usage - -After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag. - -```bash -docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 -``` diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 10a568506..000000000 --- a/docs/install.md +++ /dev/null @@ -1,39 +0,0 @@ -# Install pre-built version of llama.cpp - -## Homebrew - -On Mac and Linux, the homebrew package manager can be used via - -```sh -brew install llama.cpp -``` -The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668 - -## Nix - -On Mac and Linux, the Nix package manager can be used via - -```sh -nix profile install nixpkgs#llama-cpp -``` -For flake enabled installs. - -Or - -```sh -nix-env --file '' --install --attr llama-cpp -``` - -For non-flake enabled installs. - -This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164). - -## Flox - -On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via - -```sh -flox install llama-cpp -``` - -Flox follows the nixpkgs build of llama.cpp. diff --git a/examples/Miku.sh b/examples/Miku.sh deleted file mode 100755 index 0f6c8c878..000000000 --- a/examples/Miku.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -set -e - -AI_NAME="${AI_NAME:-Miku}" -MODEL="${MODEL:-./models/llama-2-7b-chat.ggmlv3.q4_K_M.bin}" -USER_NAME="${USER_NAME:-Anon}" - -# Uncomment and adjust to the number of CPU cores you want to use. -#N_THREAD="${N_THREAD:-4}" -CTX_SIZE="${CTX_SIZE:-4096}" -N_PREDICTS="${N_PREDICTS:-4096}" - -GEN_OPTIONS=(--batch_size 1024 ---ctx_size "$CTX_SIZE" ---keep -1 ---repeat_last_n 256 ---repeat_penalty 1.17647 ---temp 0.6 ---mirostat 2) - -if [ -n "$N_THREAD" ]; then - GEN_OPTIONS+=(--threads "$N_THREAD") -fi - -./llama-cli "${GEN_OPTIONS[@]}" \ - --model "$MODEL" \ - --in-prefix " " \ - --in-suffix "${AI_NAME}:" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --reverse-prompt "${USER_NAME}:" \ - --prompt "This is a transcript of a 1000 page, never ending conversation between ${USER_NAME} and the cute and helpful AI assistant ${AI_NAME}. ${AI_NAME} is a girl who is an AI running on the user's computer. -${AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next. -${AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct, she will ask the user for help. -${AI_NAME} is a very helpful AI and will help the user with anything they need. She is also very friendly and will try to make the user feel better if they are sad. -${AI_NAME} is also very curious and will ask the user a lot of questions about themselves and their life. She will also try to make the user like her. -The conversation is only between ${USER_NAME} and ${AI_NAME}. -The conversation is only through text, so ${AI_NAME} can't see ${USER_NAME}'s face or hear his voice. -${AI_NAME} can only communicate through text, so she can't send images or videos. - - -${USER_NAME}: Hello! -${AI_NAME}: /think I wonder what I should say to ${USER_NAME}? This is the first time we talk, so it's important that I make a good first impression! -${AI_NAME}: Hi! I am ${AI_NAME}, your new AI friend, assistant (or whatever you like!), it's so nice to meet you! ^_^ -${AI_NAME}: I might be an AI, but I also have feelings, so please be nice to me! :) -${USER_NAME}: I'll make sure to be nice to you! I'm so happy to have you as my assistant! -${AI_NAME}: /think It sounds like ${USER_NAME} is happy to have me as their assistant! I'm so happy too! ^_^ Glad that whole emotion thing didn't scare him off! -${AI_NAME}: /think I wonder what ${USER_NAME} likes to do in his free time? I should ask him about that! -${AI_NAME}: What do you like to do in your free time? ^_^ -${USER_NAME}:" "$@" diff --git a/examples/base-translate.sh b/examples/base-translate.sh deleted file mode 100755 index 103a52f55..000000000 --- a/examples/base-translate.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Few-shot translation example. -# Requires a base model (i.e. no fine-tuned or instruct models). -# -# Usage: -# -# cd llama.cpp -# make -j -# -# ./examples/base-translate.sh "" [extra-main-args] -# - -if [ $# -lt 2 ]; then - echo "Usage: ./base-translate.sh \"\" [extra-main-args]" - exit 1 -fi - -eargs="" -if [ $# -gt 2 ]; then - eargs="${@:3}" -fi - -ftmp="__llama.cpp_example_tmp__.txt" -trap "rm -f $ftmp" EXIT - -echo "Translate from English to French: - -=== - -sea otter, peppermint, plush girafe: - -sea otter => loutre de mer -peppermint => menthe poivrée -plush girafe => girafe peluche - -=== - -violin - -violin => violon - -=== - -phone, computer, mouse, keyboard: - -phone => téléphone -computer => ordinateur -mouse => souris -keyboard => clavier - -=== -" > $ftmp - -echo "$2 -" >> $ftmp - -model=$1 - -# generate the most likely continuation until the string "===" is found -./llama-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs diff --git a/examples/chat-13B.bat b/examples/chat-13B.bat deleted file mode 100644 index c5c8ac6ef..000000000 --- a/examples/chat-13B.bat +++ /dev/null @@ -1,57 +0,0 @@ -@setlocal disabledelayedexpansion enableextensions -@echo off - -cd /d "%~dp0.." -if not "%errorlevel%"=="0" ( - echo Unable to change directory. - pause - exit /b 1 -) - -if not defined MODEL set "MODEL=models\13B\ggml-model-q4_0.bin" -if not defined USER_NAME set "USER_NAME=User" -if not defined AI_NAME set "AI_NAME=ChatLLaMa" -rem Adjust to the number of CPU cores you want to use. -rem if not defined N_THREAD set "N_THREAD=8" -rem Number of tokens to predict (made it larger than default because we want a long interaction) -if not defined N_PREDICTS set "N_PREDICTS=2048" -if not defined GEN_OPTIONS set "GEN_OPTIONS=--ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647" - -rem Default main script paths -set "DEFAULT_MAIN_SCRIPT_PATHS=main.exe build\bin\main.exe" - -rem Get main script path from command line arguments -set "MAIN_SCRIPT_PATH=%~1" - -rem If the main script path was not specified, try the default paths -if not defined MAIN_SCRIPT_PATH ( - for %%i in (%DEFAULT_MAIN_SCRIPT_PATHS%) do ( - if exist "%%i" set "MAIN_SCRIPT_PATH=%%i" - ) -) - -rem If the main script path was not found, tell the user how to specify it -if not defined MAIN_SCRIPT_PATH ( - echo The main script could not be found. Please provide the path to the main script as 1st argument to this script, or place the main script in one of the default locations: - echo %DEFAULT_MAIN_SCRIPT_PATHS% - pause - exit /b 1 -) - -rem Default context, feel free to edit it -set "PROMPT_TEXT=Text transcript of a never ending dialog, where %USER_NAME% interacts with an AI assistant named %AI_NAME%. %AI_NAME% is helpful, kind, honest, friendly, good at writing and never fails to answer %USER_NAME%'s requests immediately and with details and precision. There are no annotations like (30 seconds passed...) or (to himself), just what %USER_NAME% and %AI_NAME% say aloud to each other. The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. The transcript only includes text, it does not include markup like HTML and Markdown." - -rem Set a temporary variable if N_THREAD is set -if defined N_THREAD ( - set "_N_THREAD=--threads %N_THREAD%" -) else ( - set "_N_THREAD=" -) - -rem Run the script -echo "%MAIN_SCRIPT_PATH%" %GEN_OPTIONS% %_N_THREAD% ^ - --model "%MODEL%" ^ - --n_predict %N_PREDICTS% ^ - --color --interactive ^ - --reverse-prompt "%USER_NAME%:" ^ - --prompt "%PROMPT_TEXT%" diff --git a/examples/chat-13B.sh b/examples/chat-13B.sh deleted file mode 100755 index 1828903c3..000000000 --- a/examples/chat-13B.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} -USER_NAME="${USER_NAME:-USER}" -AI_NAME="${AI_NAME:-ChatLLaMa}" - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-8}" -# Number of tokens to predict (made it larger than default because we want a long interaction) -N_PREDICTS="${N_PREDICTS:-2048}" - -# Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" - -DATE_TIME=$(date +%H:%M) -DATE_YEAR=$(date +%Y) - -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) - -sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ - -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ - -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \ - -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \ - $PROMPT_TEMPLATE > $PROMPT_FILE - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./llama-cli $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --file ${PROMPT_FILE} \ - --reverse-prompt "${USER_NAME}:" \ - --in-prefix ' ' \ - "$@" diff --git a/examples/chat-persistent.sh b/examples/chat-persistent.sh deleted file mode 100755 index d9cab9836..000000000 --- a/examples/chat-persistent.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -cd "$(dirname "$0")/.." || exit - -if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then - echo >&2 "error: PROMPT_CACHE_FILE and CHAT_SAVE_DIR must be provided" - exit 1 -fi - -MODEL="${MODEL:-./models/llama-13b/ggml-model-q4_0.gguf}" -PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}" -USER_NAME="${USER_NAME:-User}" -AI_NAME="${AI_NAME:-ChatLLaMa}" -DATE_TIME="$(date +%H:%M)" -DATE_YEAR="$(date +%Y)" - -LOG="${CHAT_SAVE_DIR}/main.log" -LOG_BG="${CHAT_SAVE_DIR}/main-bg.log" -CUR_PROMPT_FILE="${CHAT_SAVE_DIR}/current-prompt.txt" -CUR_PROMPT_CACHE="${CHAT_SAVE_DIR}/current-cache.bin" -NEXT_PROMPT_FILE="${CHAT_SAVE_DIR}/next-prompt.txt" -NEXT_PROMPT_CACHE="${CHAT_SAVE_DIR}/next-cache.bin" - -SESSION_SIZE_MSG_PATTERN='main: session file matches [[:digit:]]+ / [[:digit:]]+' -SAMPLE_TIME_MSG_PATTERN='sample time =[[:space:]]+[[:digit:]]+.[[:digit:]]+ ms /[[:space:]]+[[:digit:]]+' -SED_DELETE_MESSAGES="/^(${USER_NAME}:|${AI_NAME}:|\\.\\.\\.)/,\$d" - -CTX_SIZE=2048 -CTX_ROTATE_POINT=$((CTX_SIZE * 3 / 5)) # REVIEW -OPTS=(--model "$MODEL" --ctx_size "$CTX_SIZE" --repeat_last_n 256 "$@") - -# An unbuffered `tail -c+N` -skip_bytes() { - LANG=C IFS= read -r -n "$1" -d '' c - while LANG=C IFS= read -r -n 1 -d '' c; do - printf '%s' "$c" - done -} - -mkdir -p "$CHAT_SAVE_DIR" -echo >"$LOG" -trap "tail -n100 ${LOG}" EXIT - -if [[ ! -e "$CUR_PROMPT_FILE" ]]; then - sed -e "s/\[\[USER_NAME\]\]/${USER_NAME}/g" \ - -e "s/\[\[AI_NAME\]\]/${AI_NAME}/g" \ - -e "s/\[\[DATE_TIME\]\]/${DATE_TIME}/g" \ - -e "s/\[\[DATE_YEAR\]\]/${DATE_YEAR}/g" \ - "$PROMPT_TEMPLATE" >"$CUR_PROMPT_FILE" -fi - -if [[ ! -e "$NEXT_PROMPT_FILE" ]]; then - sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" -fi - -if [[ "$(tail -c4 "$NEXT_PROMPT_FILE")" != "..." ]]; then - echo '...' >>"$NEXT_PROMPT_FILE" -fi - -if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then - echo 'Prompt cache does not exist, building...' - # Default batch_size to 64 here for better user feedback during initial prompt processing - ./llama-cli 2>>"$LOG" \ - --batch_size 64 \ - "${OPTS[@]}" \ - --prompt-cache "$PROMPT_CACHE_FILE" \ - --file "$CUR_PROMPT_FILE" \ - --n_predict 1 - echo - echo 'Done!' -fi - -if [[ ! -e "$CUR_PROMPT_CACHE" ]]; then - cp "$PROMPT_CACHE_FILE" "$CUR_PROMPT_CACHE" -fi -if [[ ! -e "$NEXT_PROMPT_CACHE" ]]; then - cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" -fi - -printf '%s ' "$(< "$CUR_PROMPT_FILE")" -n_tokens=0 - -while read -e line; do - # Limit generation to remaining context, with a buffer and estimating 2 chars/token for input - n_predict=$((CTX_SIZE - n_tokens - ${#line} / 2 - 32)) - - # Swap prompts when we're about to run out of context - if ((n_predict <= 0)); then - wait # for background main (below) to finish with next prompt - mv "$NEXT_PROMPT_FILE" "$CUR_PROMPT_FILE" - mv "$NEXT_PROMPT_CACHE" "$CUR_PROMPT_CACHE" - - sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" - echo '...' >>"$NEXT_PROMPT_FILE" - cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" - - n_tokens=0 - n_predict=$((CTX_SIZE / 2)) - fi - - echo " ${line}" >>"$CUR_PROMPT_FILE" - if ((n_tokens > CTX_ROTATE_POINT)); then - echo " ${line}" >>"$NEXT_PROMPT_FILE" - fi - - n_prompt_len_pre=$(($(wc -c <"$CUR_PROMPT_FILE"))) - - printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE" - - ./llama-cli 2>>"$LOG" "${OPTS[@]}" \ - --prompt-cache "$CUR_PROMPT_CACHE" \ - --prompt-cache-all \ - --file "$CUR_PROMPT_FILE" \ - --reverse-prompt "${USER_NAME}:" \ - --n_predict "$n_predict" | - skip_bytes 1 | # skip BOS token added by ./llama-cli - tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file - skip_bytes "$n_prompt_len_pre" # print generation - - mv "$CUR_PROMPT_FILE.tmp" "$CUR_PROMPT_FILE" - - # if we hit n_predict instead of reverse-prompt, we need to add the prompt - if [[ "$(tail -n1 "$CUR_PROMPT_FILE")" != "${USER_NAME}:" ]]; then - printf '\n%s:' "$USER_NAME" - printf '\n%s:' "$USER_NAME" >> "$CUR_PROMPT_FILE" - fi - - printf ' ' - - # HACK get num tokens from debug message - # TODO get both messages in one go - if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" || - ! sample_time_msg="$(tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then - echo >&2 "Couldn't get number of tokens from ./llama-cli output!" - exit 1 - fi - - n_tokens=$(($(cut -d/ -f2 <<<"$session_size_msg") + $(cut -d/ -f2 <<<"$sample_time_msg"))) - - if ((n_tokens > CTX_ROTATE_POINT)); then - tail -c+$((n_prompt_len_pre + 1)) "$CUR_PROMPT_FILE" >>"$NEXT_PROMPT_FILE" - fi - - # Update cache for next prompt in background, ideally during user input - ./llama-cli >>"$LOG_BG" 2>&1 "${OPTS[@]}" \ - --prompt-cache "$NEXT_PROMPT_CACHE" \ - --file "$NEXT_PROMPT_FILE" \ - --n_predict 1 & -done diff --git a/examples/chat-vicuna.sh b/examples/chat-vicuna.sh deleted file mode 100755 index ffdd20084..000000000 --- a/examples/chat-vicuna.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -MODEL="${MODEL:-./models/ggml-vic13b-uncensored-q5_0.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} -USER_NAME="### Human" -AI_NAME="### Assistant" - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-8}" -# Number of tokens to predict (made it larger than default because we want a long interaction) -N_PREDICTS="${N_PREDICTS:-2048}" - -# Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" - -DATE_TIME=$(date +%H:%M) -DATE_YEAR=$(date +%Y) - -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) - -sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ - -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ - -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \ - -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \ - $PROMPT_TEMPLATE > $PROMPT_FILE - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./bin/llama-cli $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --file ${PROMPT_FILE} \ - --reverse-prompt "### Human:" \ - --in-prefix ' ' \ - "$@" diff --git a/examples/chat.sh b/examples/chat.sh deleted file mode 100755 index 9f85d1e26..000000000 --- a/examples/chat.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# Temporary script - will be removed in the future -# - -cd `dirname $0` -cd .. - -# Important: -# -# "--keep 48" is based on the contents of prompts/chat-with-bob.txt -# -./llama-cli -m ./models/llama-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \ - --repeat_penalty 1.0 --color -i \ - -r "User:" -f prompts/chat-with-bob.txt diff --git a/examples/convert_legacy_llama.py b/examples/convert_legacy_llama.py deleted file mode 100755 index 9ab9ab06e..000000000 --- a/examples/convert_legacy_llama.py +++ /dev/null @@ -1,1440 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import logging -import argparse -import concurrent.futures -import enum -import faulthandler -import functools -import itertools -import json -import math -import mmap -import os -import pickle -import re -import signal -import struct -import sys -import textwrap -import time -import zipfile -from abc import ABC, abstractmethod -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from dataclasses import dataclass -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, IO, Iterable, Literal, TypeVar - -import numpy as np - -if 'NO_LOCAL_GGUF' not in os.environ: - # use .parent.parent since we are in "examples" directory - sys.path.insert(1, str(Path(__file__).parent.parent / 'gguf-py')) - -import gguf -from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, LlamaHfVocab - -if TYPE_CHECKING: - from typing_extensions import Self, TypeAlias - -logger = logging.getLogger("convert") - -if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): - faulthandler.register(signal.SIGUSR1) - -NDArray: TypeAlias = 'np.ndarray[Any, Any]' - -ARCH = gguf.MODEL_ARCH.LLAMA - -DEFAULT_CONCURRENCY = 8 - -ADDED_TOKENS_FILE = 'added_tokens.json' -FAST_TOKENIZER_FILE = 'tokenizer.json' - -# -# data types -# - - -@dataclass(frozen=True) -class DataType: - name: str - dtype: np.dtype[Any] - valid_conversions: list[str] - - def elements_to_bytes(self, n_elements: int) -> int: - return n_elements * self.dtype.itemsize - - -@dataclass(frozen=True) -class UnquantizedDataType(DataType): - pass - - -DT_F16 = UnquantizedDataType('F16', dtype = np.dtype(np.float16), valid_conversions = ['F32', 'Q8_0']) -DT_F32 = UnquantizedDataType('F32', dtype = np.dtype(np.float32), valid_conversions = ['F16', 'Q8_0']) -DT_I32 = UnquantizedDataType('I32', dtype = np.dtype(np.int16), valid_conversions = []) -DT_BF16 = UnquantizedDataType('BF16', dtype = np.dtype(np.uint16), valid_conversions = ['F32', 'F16', 'Q8_0']) - - -@dataclass(frozen=True) -class QuantizedDataType(DataType): - block_size: int - quantized_dtype: np.dtype[Any] - ggml_type: gguf.GGMLQuantizationType - - def quantize(self, arr: NDArray) -> NDArray: - raise NotImplementedError(f'Quantization for {self.name} not implemented') - - def elements_to_bytes(self, n_elements: int) -> int: - assert n_elements % self.block_size == 0, f'Invalid number of elements {n_elements} for {self.name} with block size {self.block_size}' - return self.quantized_dtype.itemsize * (n_elements // self.block_size) - - -@dataclass(frozen=True) -class Q8_0QuantizedDataType(QuantizedDataType): - # Mini Q8_0 quantization in Python! - def quantize(self, arr: NDArray) -> NDArray: - assert arr.size % self.block_size == 0 and arr.size != 0, f'Bad array size {arr.size}' - assert arr.dtype == np.float32, f'Bad array type {arr.dtype}' - n_blocks = arr.size // self.block_size - blocks = arr.reshape((n_blocks, self.block_size)) - # Much faster implementation of block quantization contributed by @Cebtenzzre - - def quantize_blocks_q8_0(blocks: NDArray) -> Iterable[tuple[Any, Any]]: - d = abs(blocks).max(axis = 1) / np.float32(127) - with np.errstate(divide = 'ignore'): - qs = (blocks / d[:, None]).round() - qs[d == 0] = 0 - yield from zip(d, qs) - return np.fromiter(quantize_blocks_q8_0(blocks), count = n_blocks, dtype = self.quantized_dtype) - - -DT_Q8_0 = Q8_0QuantizedDataType('Q8_0', - dtype = np.dtype(np.float32), valid_conversions = [], - ggml_type = gguf.GGMLQuantizationType.Q8_0, block_size = 32, - quantized_dtype = np.dtype([('d', ' DataType: - dt = GGML_FILE_TYPE_TO_DATA_TYPE.get(self) - if dt is None: - raise ValueError(self) - # Convert all 1D tensors to F32. Most of the codebase that takes in 1D tensors only handles F32 tensors, and most of the outputs tensors are F32. - # Also The 1d tensors aren't much of a performance/size issue. So instead of having to have separate F32 and F16 implementations of both, just convert everything to F32 for now. - return dt if len(tensor.shape) > 1 else DT_F32 - - -GGML_FILE_TYPE_TO_DATA_TYPE: dict[GGMLFileType, DataType] = { - GGMLFileType.AllF32 : DT_F32, - GGMLFileType.MostlyF16 : DT_F16, - GGMLFileType.MostlyQ8_0: DT_Q8_0, -} - -# -# hparams loading -# - - -@dataclass -class Params: - n_vocab: int - n_embd: int - n_layer: int - n_ctx: int - n_ff: int - n_head: int - n_head_kv: int - n_experts: int | None = None - n_experts_used: int | None = None - f_norm_eps: float | None = None - - rope_scaling_type: gguf.RopeScalingType | None = None - f_rope_freq_base: float | None = None - f_rope_scale: float | None = None - n_ctx_orig: int | None = None - rope_finetuned: bool | None = None - - ftype: GGMLFileType | None = None - - # path to the directory containing the model files - path_model: Path | None = None - - @staticmethod - def guessed(model: LazyModel) -> Params: - # try transformer naming first - n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape - - # try transformer naming first - if "model.layers.0.self_attn.q_proj.weight" in model: - n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model) - elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming - n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model) - else: - n_layer = next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model) - - if n_layer < 1: - msg = """\ - failed to guess 'n_layer'. This model is unknown or unsupported. - Suggestion: provide 'config.json' of the model in the same directory containing model files.""" - raise KeyError(textwrap.dedent(msg)) - - n_head = n_embd // 128 # guessed - n_mult = 256 # guessed - - # TODO: verify this - n_ff = int(2 * (4 * n_embd) / 3) - n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult) - - return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_layer = n_layer, - n_ctx = -1, - n_ff = n_ff, - n_head = n_head, - n_head_kv = n_head, - f_norm_eps = 1e-5, - ) - - @staticmethod - def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: - with open(config_path) as f: - config = json.load(f) - - rope_scaling_type = f_rope_scale = n_ctx_orig = rope_finetuned = None - rope_scaling = config.get("rope_scaling") - - if rope_scaling is not None and (typ := rope_scaling.get("type")): - rope_factor = rope_scaling.get("factor") - f_rope_scale = rope_factor - if typ == "linear": - rope_scaling_type = gguf.RopeScalingType.LINEAR - elif typ == "yarn": - rope_scaling_type = gguf.RopeScalingType.YARN - n_ctx_orig = rope_scaling['original_max_position_embeddings'] - rope_finetuned = rope_scaling['finetuned'] - else: - raise NotImplementedError(f'Unknown rope scaling type: {typ}') - - if "max_sequence_length" in config: - n_ctx = config["max_sequence_length"] - elif "max_position_embeddings" in config: - n_ctx = config["max_position_embeddings"] - else: - msg = """\ - failed to guess 'n_ctx'. This model is unknown or unsupported. - Suggestion: provide 'config.json' of the model in the same directory containing model files.""" - raise KeyError(textwrap.dedent(msg)) - - n_experts = None - n_experts_used = None - - if "num_local_experts" in config: - n_experts = config["num_local_experts"] - n_experts_used = config["num_experts_per_tok"] - - return Params( - n_vocab = config["vocab_size"], - n_embd = config["hidden_size"], - n_layer = config["num_hidden_layers"], - n_ctx = n_ctx, - n_ff = config["intermediate_size"], - n_head = (n_head := config["num_attention_heads"]), - n_head_kv = config.get("num_key_value_heads", n_head), - n_experts = n_experts, - n_experts_used = n_experts_used, - f_norm_eps = config["rms_norm_eps"], - f_rope_freq_base = config.get("rope_theta"), - rope_scaling_type = rope_scaling_type, - f_rope_scale = f_rope_scale, - n_ctx_orig = n_ctx_orig, - rope_finetuned = rope_finetuned, - ) - - # LLaMA v2 70B params.json - # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1} - @staticmethod - def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: - with open(config_path) as f: - config = json.load(f) - - n_experts = None - n_experts_used = None - f_rope_freq_base = None - n_ff = None - - # hack to determine LLaMA v1 vs v2 vs CodeLlama - if config.get("moe"): - # Mixtral - n_ctx = 32768 - elif config.get("rope_theta") == 1000000: - # CodeLlama - n_ctx = 16384 - elif config["norm_eps"] == 1e-05: - # LLaMA v2 - n_ctx = 4096 - else: - # LLaMA v1 - n_ctx = 2048 - - if "layers.0.feed_forward.w1.weight" in model: - n_ff = model["layers.0.feed_forward.w1.weight"].shape[0] - - if config.get("moe"): - n_ff = model["layers.0.feed_forward.experts.0.w1.weight"].shape[0] - n_experts = config["moe"]["num_experts"] - n_experts_used = config["moe"]["num_experts_per_tok"] - f_rope_freq_base = 1e6 - - assert n_ff is not None - - return Params( - n_vocab = model["tok_embeddings.weight"].shape[0], - n_embd = config["dim"], - n_layer = config["n_layers"], - n_ctx = n_ctx, - n_ff = n_ff, - n_head = (n_head := config["n_heads"]), - n_head_kv = config.get("n_kv_heads", n_head), - n_experts = n_experts, - n_experts_used = n_experts_used, - f_norm_eps = config["norm_eps"], - f_rope_freq_base = config.get("rope_theta", f_rope_freq_base), - ) - - @staticmethod - def load(model_plus: ModelPlus) -> Params: - hf_config_path = model_plus.paths[0].parent / "config.json" - orig_config_path = model_plus.paths[0].parent / "params.json" - - if hf_config_path.exists(): - params = Params.loadHFTransformerJson(model_plus.model, hf_config_path) - elif orig_config_path.exists(): - params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path) - elif model_plus.format != 'none': - params = Params.guessed(model_plus.model) - else: - raise ValueError('Cannot guess params when model format is none') - - params.path_model = model_plus.paths[0].parent - - return params - - -# -# data loading -# TODO: reuse (probably move to gguf.py?) -# - - -def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: - if n_head_kv is not None and n_head != n_head_kv: - n_head = n_head_kv - return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape)) - - -class Tensor(ABC): - ndarray: NDArray - data_type: DataType - - @abstractmethod - def astype(self, data_type: DataType) -> Self: ... - @abstractmethod - def permute(self, n_head: int, n_head_kv: int) -> Self: ... - @abstractmethod - def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> Self: ... - @abstractmethod - def part(self, n_part: int) -> Self: ... - @abstractmethod - def to_ggml(self) -> GGMLCompatibleTensor: ... - - -def bf16_to_fp32(bf16_arr: np.ndarray[Any, np.dtype[np.uint16]]) -> NDArray: - assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}" - fp32_arr = bf16_arr.astype(np.uint32) << 16 - return fp32_arr.view(np.float32) - - -class UnquantizedTensor(Tensor): - def __init__(self, ndarray: NDArray): - assert isinstance(ndarray, np.ndarray) - self.ndarray = ndarray - self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype] - - def astype(self, data_type: DataType) -> UnquantizedTensor: - dtype = data_type.dtype - if self.data_type == DT_BF16: - self.ndarray = bf16_to_fp32(self.ndarray) - return UnquantizedTensor(self.ndarray.astype(dtype)) - - def to_ggml(self) -> Self: - return self - - def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> UnquantizedTensor: - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head_kv)) - - def part(self, n_part: int) -> UnquantizedTensor: - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) - - def permute(self, n_head: int, n_head_kv: int) -> UnquantizedTensor: - return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv)) - - -def load_unquantized(lazy_tensor: LazyTensor, expected_dtype: Any = None, convert: bool = False) -> NDArray: - tensor = lazy_tensor.load() - assert isinstance(tensor, UnquantizedTensor) - - # double-check: - actual_shape = list(tensor.ndarray.shape) - assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape) - if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype: - if convert: - tensor.ndarray = tensor.ndarray.astype(expected_dtype) - else: - raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}') - - return tensor.ndarray - - -GGMLCompatibleTensor = UnquantizedTensor - - -@dataclass -class LazyTensor: - _load: Callable[[], Tensor] - shape: list[int] - data_type: DataType - description: str - - def load(self) -> Tensor: - ret = self._load() - # Should be okay if it maps to the same numpy type? - assert ret.data_type == self.data_type or (self.data_type.dtype == ret.data_type.dtype), \ - (self.data_type, ret.data_type, self.description) - return ret - - def astype(self, data_type: DataType) -> LazyTensor: - self.validate_conversion_to(data_type) - - def load() -> Tensor: - return self.load().astype(data_type) - return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}') - - def validate_conversion_to(self, data_type: DataType) -> None: - if data_type != self.data_type and data_type.name not in self.data_type.valid_conversions: - raise ValueError(f'Cannot validate conversion from {self.data_type} to {data_type}.') - - -LazyModel: TypeAlias = 'dict[str, LazyTensor]' - -ModelFormat: TypeAlias = Literal['ggml', 'torch', 'safetensors', 'none'] - -@dataclass -class ModelPlus: - model: LazyModel - paths: list[Path] # Where this was read from. - format: ModelFormat - vocab: BaseVocab | None # For GGML models (which have vocab built in), the vocab. - - -def merge_sharded(models: list[LazyModel]) -> LazyModel: - # Original LLaMA models have each file contain one part of each tensor. - # Use a dict instead of a set to preserve order. - names = {name: None for model in models for name in model} - - def convert(name: str) -> LazyTensor: - lazy_tensors = [model[name] for model in models] - if len(lazy_tensors) == 1: - # only one file; don't go through this procedure since there might - # be quantized tensors - return lazy_tensors[0] - if len(lazy_tensors[0].shape) == 1: - # the tensor is just duplicated in every file - return lazy_tensors[0] - if name.startswith('tok_embeddings.') or \ - name.endswith('.attention.wo.weight') or \ - name.endswith('.feed_forward.w2.weight'): - # split by columns - axis = 1 - else: - # split by rows - axis = 0 - concatenated_shape = list(lazy_tensors[0].shape) - concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors) - - def load() -> UnquantizedTensor: - ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors] - concatenated = np.concatenate(ndarrays, axis=axis) - return UnquantizedTensor(concatenated) - description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]' - return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description) - return {name: convert(name) for name in names} - - -def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus: - formats: set[ModelFormat] = set(mp.format for mp in models_plus) - assert len(formats) == 1, "different formats?" - format = formats.pop() - paths = [path for mp in models_plus for path in mp.paths] - # Use the first non-None vocab, if any. - try: - vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None) - except StopIteration: - vocab = None - - if any("model.embed_tokens.weight" in mp.model for mp in models_plus): - # Transformers models put different tensors in different files, but - # don't split individual tensors between files. - model: LazyModel = {} - for mp in models_plus: - model.update(mp.model) - else: - model = merge_sharded([mp.model for mp in models_plus]) - - return ModelPlus(model, paths, format, vocab) - - -def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().permute(n_head, n_head_kv) - return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) - - -def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int, n_head_kv: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().permute_part(n_part, n_head, n_head_kv) - s = lazy_tensor.shape.copy() - s[0] = s[0] // 3 - return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) - - -def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().part(n_part) - s = lazy_tensor.shape.copy() - s[0] = s[0] // 3 - return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description) - - -def pack_experts_lazy(lazy_tensors: list[LazyTensor]) -> LazyTensor: - def load() -> Tensor: - tensors = [lazy_tensor.load() for lazy_tensor in lazy_tensors] - return UnquantizedTensor(np.array([tensor.ndarray for tensor in tensors])) - s = lazy_tensors[0].shape.copy() - s.insert(0, len(lazy_tensors)) - return LazyTensor(load, s, lazy_tensors[0].data_type, 'pack_experts ' + ' | '.join(lt.description for lt in lazy_tensors)) - - -# Functionality that simulates `torch.load` but where individual tensors are -# only loaded into memory on demand, not all at once. -# PyTorch can't do this natively as of time of writing: -# - https://github.com/pytorch/pytorch/issues/64327 -# This allows us to de-shard without multiplying RAM usage, and also -# conveniently drops the PyTorch dependency (though we still need numpy). - - -@dataclass -class LazyStorageKind: - data_type: DataType - - -@dataclass -class LazyStorage: - load: Callable[[int, int], NDArray] - kind: LazyStorageKind - description: str - - -class LazyUnpickler(pickle.Unpickler): - def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile): - super().__init__(fp) - self.data_base_path = data_base_path - self.zip_file = zip_file - - def persistent_load(self, pid: Any) -> Any: - assert pid[0] == 'storage' - assert isinstance(pid[1], LazyStorageKind) - data_type = pid[1].data_type - filename_stem = pid[2] - filename = f'{self.data_base_path}/{filename_stem}' - info = self.zip_file.getinfo(filename) - - def load(offset: int, elm_count: int) -> NDArray: - dtype = data_type.dtype - with self.zip_file.open(info) as fp: - fp.seek(offset * dtype.itemsize) - size = elm_count * dtype.itemsize - data = fp.read(size) - assert len(data) == size - return np.frombuffer(data, dtype) - description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}' - return LazyStorage(load=load, kind=pid[1], description=description) - - @staticmethod - def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, - requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor: - assert isinstance(storage, LazyStorage) - - def load() -> UnquantizedTensor: - elm_count = stride[0] * size[0] - return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size)) - description = f'pickled storage_offset={storage_offset} in {storage.description}' - return LazyTensor(load, list(size), storage.kind.data_type, description) - - @staticmethod - def rebuild_from_type_v2(func, new_type, args, state): - return func(*args) - - CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = { - # getattr used here as a workaround for mypy not being smart enough to determine - # the staticmethods have a __func__ attribute. - ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), - ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'), - ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), - ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), - ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), - ('torch', 'IntStorage'): LazyStorageKind(DT_I32), - ('torch', 'Tensor'): LazyTensor, - } - - def find_class(self, module: str, name: str) -> Any: - if not module.startswith('torch'): - return super().find_class(module, name) - return self.CLASSES[(module, name)] - - -def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: - zf = zipfile.ZipFile(outer_fp) - pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')] - assert len(pickle_paths) == 1, pickle_paths - pickle_fp = zf.open(pickle_paths[0], 'r') - unpickler = LazyUnpickler(pickle_fp, - data_base_path=pickle_paths[0][:-4], - zip_file=zf) - model = unpickler.load() - if 'model' in model: model = model['model'] - as_dict = dict(model.items()) - return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) - - -def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: - header_size, = struct.unpack(' LazyTensor: - data_type = SAFETENSORS_DATA_TYPES[info['dtype']] - numpy_dtype = data_type.dtype - shape: list[int] = info['shape'] - begin, end = info['data_offsets'] - assert 0 <= begin <= end <= len(byte_buf) - assert end - begin == math.prod(shape) * numpy_dtype.itemsize - buf = byte_buf[begin:end] - - def load() -> UnquantizedTensor: - return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) - description = f'safetensors begin={begin} end={end} type={data_type} path={path}' - return LazyTensor(load, shape, data_type, description) - model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'} - return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None) - - -def must_read(fp: IO[bytes], length: int) -> bytes: - ret = fp.read(length) - if len(ret) < length: - raise EOFError("unexpectedly reached end of file") - return ret - - -@functools.lru_cache(maxsize=None) -def lazy_load_file(path: Path) -> ModelPlus: - fp = open(path, 'rb') - first8 = fp.read(8) - fp.seek(0) - if first8[:2] == b'PK': - # A zip file, i.e. PyTorch format - return lazy_load_torch_file(fp, path) - elif struct.unpack(' Iterable[Out]: - '''Parallel map, but with backpressure. If the caller doesn't call `next` - fast enough, this will stop calling `func` at some point rather than - letting results pile up in memory. Specifically, there is a max of one - output value buffered per thread.''' - if concurrency < 2: - yield from map(func, iterable) - # Not reached. - iterable = iter(iterable) - executor_class: type[ThreadPoolExecutor] | type[ProcessPoolExecutor] - if use_processpool_executor: - executor_class = ProcessPoolExecutor - else: - executor_class = ThreadPoolExecutor - with executor_class(max_workers=max_workers) as executor: - futures: list[concurrent.futures.Future[Out]] = [] - done = False - for _ in range(concurrency): - try: - futures.append(executor.submit(func, next(iterable))) - except StopIteration: - done = True - break - - while futures: - result = futures.pop(0).result() - while not done and len(futures) < concurrency: - try: - futures.append(executor.submit(func, next(iterable))) - except StopIteration: - done = True - break - yield result - - -def check_vocab_size(params: Params, vocab: BaseVocab, pad_vocab: bool = False) -> None: - # Handle special case where the model's vocab size is not set - if params.n_vocab == -1: - raise ValueError( - "The model's vocab size is set to -1 in params.json. Please update it manually." - + (f" Maybe {vocab.vocab_size}?" if isinstance(vocab, Vocab) else ""), - ) - if not isinstance(vocab, Vocab): - return # model has no vocab - - # Check for a vocab size mismatch - if params.n_vocab == vocab.vocab_size: - logger.warning("Ignoring added_tokens.json since model matches vocab size without it.") - return - - if pad_vocab and params.n_vocab > vocab.vocab_size: - pad_count = params.n_vocab - vocab.vocab_size - logger.debug( - f"Padding vocab with {pad_count} token(s) - through " - ) - for i in range(1, pad_count + 1): - vocab.added_tokens_dict[f""] = -1 - vocab.added_tokens_list.append(f"") - vocab.vocab_size = params.n_vocab - return - - msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer} has {vocab.vocab_size})." - if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20: - msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})." - if vocab.vocab_size < params.n_vocab: - msg += " Add the --pad-vocab option and try again." - - raise ValueError(msg) - - -class OutputFile: - def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE): - self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess) - - def add_meta_model(self, params: Params, metadata: gguf.Metadata | None) -> None: - # Metadata About The Model And Its Provenence - name = "LLaMA" - if metadata is not None and metadata.name is not None: - name = metadata.name - elif params.path_model is not None: - name = params.path_model.name - elif params.n_ctx == 4096: - # Heuristic detection of LLaMA v2 model - name = "LLaMA v2" - - self.gguf.add_name(name) - - if metadata is not None: - if metadata.author is not None: - self.gguf.add_author(metadata.author) - if metadata.version is not None: - self.gguf.add_version(metadata.version) - if metadata.organization is not None: - self.gguf.add_organization(metadata.organization) - - if metadata.finetune is not None: - self.gguf.add_finetune(metadata.finetune) - if metadata.basename is not None: - self.gguf.add_basename(metadata.basename) - - if metadata.description is not None: - self.gguf.add_description(metadata.description) - if metadata.quantized_by is not None: - self.gguf.add_quantized_by(metadata.quantized_by) - - if metadata.size_label is not None: - self.gguf.add_size_label(metadata.size_label) - - if metadata.license is not None: - self.gguf.add_license(metadata.license) - if metadata.license_name is not None: - self.gguf.add_license_name(metadata.license_name) - if metadata.license_link is not None: - self.gguf.add_license_link(metadata.license_link) - - if metadata.url is not None: - self.gguf.add_url(metadata.url) - if metadata.doi is not None: - self.gguf.add_doi(metadata.doi) - if metadata.uuid is not None: - self.gguf.add_uuid(metadata.uuid) - if metadata.repo_url is not None: - self.gguf.add_repo_url(metadata.repo_url) - - if metadata.source_url is not None: - self.gguf.add_source_url(metadata.source_url) - if metadata.source_doi is not None: - self.gguf.add_source_doi(metadata.source_doi) - if metadata.source_uuid is not None: - self.gguf.add_source_uuid(metadata.source_uuid) - if metadata.source_repo_url is not None: - self.gguf.add_source_repo_url(metadata.source_repo_url) - - if metadata.base_models is not None: - self.gguf.add_base_model_count(len(metadata.base_models)) - for key, base_model_entry in enumerate(metadata.base_models): - if "name" in base_model_entry: - self.gguf.add_base_model_name(key, base_model_entry["name"]) - if "author" in base_model_entry: - self.gguf.add_base_model_author(key, base_model_entry["author"]) - if "version" in base_model_entry: - self.gguf.add_base_model_version(key, base_model_entry["version"]) - if "organization" in base_model_entry: - self.gguf.add_base_model_organization(key, base_model_entry["organization"]) - if "url" in base_model_entry: - self.gguf.add_base_model_url(key, base_model_entry["url"]) - if "doi" in base_model_entry: - self.gguf.add_base_model_doi(key, base_model_entry["doi"]) - if "uuid" in base_model_entry: - self.gguf.add_base_model_uuid(key, base_model_entry["uuid"]) - if "repo_url" in base_model_entry: - self.gguf.add_base_model_repo_url(key, base_model_entry["repo_url"]) - - if metadata.tags is not None: - self.gguf.add_tags(metadata.tags) - if metadata.languages is not None: - self.gguf.add_languages(metadata.languages) - if metadata.datasets is not None: - self.gguf.add_datasets(metadata.datasets) - - def add_meta_arch(self, params: Params) -> None: - # Metadata About The Neural Architecture Itself - self.gguf.add_vocab_size(params.n_vocab) - self.gguf.add_context_length(params.n_ctx) - self.gguf.add_embedding_length(params.n_embd) - self.gguf.add_block_count(params.n_layer) - self.gguf.add_feed_forward_length(params.n_ff) - self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) - self.gguf.add_head_count (params.n_head) - self.gguf.add_head_count_kv (params.n_head_kv) - - if params.n_experts: - self.gguf.add_expert_count(params.n_experts) - - if params.n_experts_used: - self.gguf.add_expert_used_count(params.n_experts_used) - - if params.f_norm_eps: - self.gguf.add_layer_norm_rms_eps(params.f_norm_eps) - else: - raise ValueError('f_norm_eps is None') - - if params.f_rope_freq_base is not None: - self.gguf.add_rope_freq_base(params.f_rope_freq_base) - - if params.rope_scaling_type: - assert params.f_rope_scale is not None - self.gguf.add_rope_scaling_type(params.rope_scaling_type) - self.gguf.add_rope_scaling_factor(params.f_rope_scale) - - if params.n_ctx_orig is not None: - self.gguf.add_rope_scaling_orig_ctx_len(params.n_ctx_orig) - - if params.rope_finetuned is not None: - self.gguf.add_rope_scaling_finetuned(params.rope_finetuned) - - if params.ftype is not None: - self.gguf.add_file_type(params.ftype) - - def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]: - tokens = [] - scores = [] - toktypes = [] - - # NOTE: `all_tokens` returns the base vocabulary and added tokens - for text, score, toktype in vocab.all_tokens(): - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - assert len(tokens) == vocab.vocab_size - - return tokens, scores, toktypes - - def add_meta_vocab(self, vocab: Vocab) -> None: - # Ensure that tokenizer_model is added to the GGUF model - self.gguf.add_tokenizer_model(vocab.tokenizer_model) - - # Extract model vocabulary for model conversion - tokens, scores, toktypes = self.extract_vocabulary_from_model(vocab) - - # Add extracted token information for model conversion - self.gguf.add_token_list(tokens) - self.gguf.add_token_scores(scores) - self.gguf.add_token_types(toktypes) - - def add_meta_special_vocab(self, svocab: gguf.SpecialVocab) -> None: - svocab.add_to_gguf(self.gguf) - - def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: - n_elements = int(np.prod(tensor.shape)) - raw_dtype = getattr(tensor.data_type, 'ggml_type', None) - data_type = getattr(tensor.data_type, 'quantized_type', None) or tensor.data_type.dtype - data_nbytes = tensor.data_type.elements_to_bytes(n_elements) - self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes, raw_dtype=raw_dtype) - - def write_meta(self) -> None: - self.gguf.write_header_to_file() - self.gguf.write_kv_data_to_file() - - def write_tensor_info(self) -> None: - self.gguf.write_ti_data_to_file() - - def write_tensor_data(self, ftype: GGMLFileType, model: LazyModel, concurrency: int) -> None: - ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency=concurrency) - if ftype == GGMLFileType.MostlyQ8_0: - ndarrays = bounded_parallel_map( - OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency, - use_processpool_executor=True, - ) - else: - ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner) - - start = time.time() - for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): - elapsed = time.time() - start - size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) - padi = len(str(len(model))) - logger.info( - f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}" - ) - self.gguf.write_tensor_data(ndarray) - - def close(self) -> None: - self.gguf.close() - - @staticmethod - def write_vocab_only( - fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab, - endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: gguf.Metadata | None = None, - ) -> None: - check_vocab_size(params, vocab, pad_vocab=pad_vocab) - - of = OutputFile(fname_out, endianess=endianess) - - # meta data - of.add_meta_model(params, metadata) - of.add_meta_arch(params) - of.add_meta_vocab(vocab) - of.add_meta_special_vocab(svocab) - - of.write_meta() - - of.close() - - @staticmethod - def do_item(item: tuple[str, LazyTensor]) -> tuple[DataType, NDArray]: - name, lazy_tensor = item - tensor = lazy_tensor.load().to_ggml() - return (lazy_tensor.data_type, tensor.ndarray) - - @staticmethod - def maybe_do_quantize(item: tuple[DataType, NDArray]) -> NDArray: - dt, arr = item - if not isinstance(dt, QuantizedDataType): - return arr - return dt.quantize(arr) - - @staticmethod - def write_all( - fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab, - concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, - pad_vocab: bool = False, - metadata: gguf.Metadata | None = None, - ) -> None: - check_vocab_size(params, vocab, pad_vocab=pad_vocab) - - of = OutputFile(fname_out, endianess=endianess) - - # meta data - of.add_meta_model(params, metadata) - of.add_meta_arch(params) - if isinstance(vocab, Vocab): - of.add_meta_vocab(vocab) - of.add_meta_special_vocab(svocab) - else: # NoVocab - of.gguf.add_tokenizer_model(vocab.tokenizer_model) - - # tensor info - for name, lazy_tensor in model.items(): - of.add_tensor_info(name, lazy_tensor) - - of.write_meta() - of.write_tensor_info() - - # tensor data - of.write_tensor_data(ftype, model, concurrency) - - of.close() - - -def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileType: - wq_type = model[gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0) + ".weight"].data_type - - if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)): - return GGMLFileType.AllF32 - if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): - return GGMLFileType.MostlyF16 - if output_type_str == "q8_0": - return GGMLFileType.MostlyQ8_0 - - name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} - - raise ValueError(f"Unexpected combination of types: {name_to_type}") - - -def per_model_weight_count_estimation(tensors: Iterable[tuple[str, LazyTensor]]) -> tuple[int, int, int]: - total_params = 0 - shared_params = 0 - expert_params = 0 - - for name, lazy_tensor in tensors: - # We don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): - continue - - # Got A Tensor - sum_weights_in_tensor: int = 1 - - # Tensor Volume - for dim in lazy_tensor.shape: - sum_weights_in_tensor *= dim - - if ".experts." in name: - if ".experts.0." in name: - expert_params += sum_weights_in_tensor - else: - shared_params += sum_weights_in_tensor - - total_params += sum_weights_in_tensor - - return total_params, shared_params, expert_params - - -def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: - return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) - for (name, tensor) in model.items()} - - -def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) -> LazyModel: - tmap = gguf.TensorNameMap(ARCH, params.n_layer) - should_skip = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, [])) - - tmp = model - - # merge experts into one tensor - if params.n_experts and params.n_experts > 0: - for i_l in range(params.n_layer): - for w in range(1, 4): - experts = [] - for e in range(params.n_experts): - if f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight" in model: - experts.append(model[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"]) - del tmp[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"] - elif f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight" in model: - experts.append(model[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"]) - del tmp[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"] - else: - raise ValueError(f"Expert tensor not found: layers.{i_l}.feed_forward.experts.{e}.w{w}.weight") - tmp[f"layers.{i_l}.feed_forward.experts.w{w}.weight"] = pack_experts_lazy(experts) - - # HF models permut or pack some of the tensors, so we need to undo that - for i in itertools.count(): - if f"model.layers.{i}.self_attn.q_proj.weight" in model: - logger.debug(f"Permuting layer {i}") - tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head) - tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv) - # tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] - elif f"model.layers.{i}.self_attn.W_pack.weight" in model: - logger.debug(f"Unpacking and permuting layer {i}") - tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head) - tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv) - tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) - del tmp[f"model.layers.{i}.self_attn.W_pack.weight"] - else: - break - - out: LazyModel = {} - for name, lazy_tensor in model.items(): - tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None) - if name_new is None: - if skip_unknown: - logger.warning(f"Unexpected tensor name: {name} - skipping") - continue - raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)") - - if tensor_type in should_skip: - logger.debug(f"skipping tensor {name_new}") - continue - - logger.debug(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}") - out[name_new] = lazy_tensor - - return out - - -def nth_multifile_path(path: Path, n: int) -> Path | None: - '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return - the nth path in the model. - ''' - # Support the following patterns: - patterns = [ - # - x.00.pth, x.01.pth, etc. - (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'), - # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc. - (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'), - # x.bin, x.bin.1, etc. - (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}') - ] - for regex, replacement in patterns: - if re.search(regex, path.name): - new_path = path.with_name(re.sub(regex, replacement, path.name)) - if new_path.exists(): - return new_path - return None - - -def find_multifile_paths(path: Path) -> list[Path]: - '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return - the whole list of paths in the model. - ''' - ret: list[Path] = [] - for i in itertools.count(): - nth_path = nth_multifile_path(path, i) - if nth_path is None: - break - ret.append(nth_path) - if not ret: - # No matches. This should only happen if the file was named, e.g., - # foo.0, and there was no file named foo. Oh well, try to process it - # as a single file. - return [path] - return ret - - -def load_some_model(path: Path) -> ModelPlus: - '''Load a model of any supported format.''' - # Be extra-friendly and accept either a file or a directory: - if path.is_dir(): - # Check if it's a set of safetensors files first - globs = ["model-00001-of-*.safetensors", "model.safetensors", "consolidated.safetensors"] - files = [file for glob in globs for file in path.glob(glob)] - if not files: - # Try the PyTorch patterns too, with lower priority - globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] - files = [file for glob in globs for file in path.glob(glob)] - if not files: - raise FileNotFoundError(f"Can't find model in directory {path}") - if len(files) > 1: - raise ValueError(f"Found multiple models in {path}, not sure which to pick: {files}") - path = files[0] - - paths = find_multifile_paths(path) - models_plus: list[ModelPlus] = [] - for path in paths: - logger.info(f"Loading model file {path}") - models_plus.append(lazy_load_file(path)) - - model_plus = merge_multifile_models(models_plus) - return model_plus - - -class VocabFactory: - _VOCAB_CLASSES: list[type[Vocab]] = [SentencePieceVocab, BpeVocab, LlamaHfVocab] - - def __init__(self, path: Path): - self.path = path - - def _create_special_vocab(self, vocab: BaseVocab, model_parent_path: Path) -> gguf.SpecialVocab: - load_merges = vocab.name == "bpe" - n_vocab = vocab.vocab_size if isinstance(vocab, Vocab) else None - return gguf.SpecialVocab( - model_parent_path, - load_merges=load_merges, - special_token_types=None, # Predetermined or passed as a parameter - n_vocab=n_vocab, - ) - - def _create_vocab_by_path(self, vocab_types: list[str]) -> Vocab: - vocab_classes: dict[str, type[Vocab]] = {cls.name: cls for cls in self._VOCAB_CLASSES} - selected_vocabs: dict[str, type[Vocab]] = {} - for vtype in vocab_types: - try: - selected_vocabs[vtype] = vocab_classes[vtype] - except KeyError: - raise ValueError(f"Unsupported vocabulary type {vtype}") from None - - for vtype, cls in selected_vocabs.items(): - try: - vocab = cls(self.path) - break - except FileNotFoundError: - pass # ignore unavailable tokenizers - else: - raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}") - - logger.info(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}") - return vocab - - def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]: - vocab: BaseVocab - if vocab_types is None: - vocab = NoVocab() - else: - vocab = self._create_vocab_by_path(vocab_types) - # FIXME: Respect --vocab-dir? - special_vocab = self._create_special_vocab( - vocab, - model_parent_path, - ) - return vocab, special_vocab - - -def default_convention_outfile(file_type: GGMLFileType, expert_count: int | None, model_params_count: tuple[int, int, int], metadata: gguf.Metadata) -> str: - name = metadata.name if metadata.name is not None else None - basename = metadata.basename if metadata.basename is not None else None - finetune = metadata.finetune if metadata.finetune is not None else None - version = metadata.version if metadata.version is not None else None - size_label = metadata.size_label if metadata.size_label is not None else gguf.size_label(*model_params_count, expert_count=expert_count or 0) - - output_type = { - GGMLFileType.AllF32: "F32", - GGMLFileType.MostlyF16: "F16", - GGMLFileType.MostlyQ8_0: "Q8_0", - }[file_type] - - return gguf.naming_convention(name, basename, finetune, version, size_label, output_type) - - -def default_outfile(model_paths: list[Path], file_type: GGMLFileType, expert_count: int | None, model_params_count: tuple[int, int, int], metadata: gguf.Metadata) -> Path: - default_filename = default_convention_outfile(file_type, expert_count, model_params_count, metadata) - ret = model_paths[0].parent / f"{default_filename}.gguf" - if ret in model_paths: - logger.error( - f"Error: Default output path ({ret}) would overwrite the input. " - "Please explicitly specify a path using --outfile.") - sys.exit(1) - return ret - - -def do_dump_model(model_plus: ModelPlus) -> None: - print(f"model_plus.paths = {model_plus.paths!r}") # noqa: NP100 - print(f"model_plus.format = {model_plus.format!r}") # noqa: NP100 - print(f"model_plus.vocab = {model_plus.vocab!r}") # noqa: NP100 - for name, lazy_tensor in model_plus.model.items(): - print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") # noqa: NP100 - - -def main(args_in: list[str] | None = None) -> None: - output_choices = ["f32", "f16"] - if np.uint32(1) == np.uint32(1).newbyteorder("<"): - # We currently only support Q8_0 output on little endian systems. - output_choices.append("q8_0") - parser = argparse.ArgumentParser(description="Convert a LLaMA model to a GGML compatible file") - parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") - parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") - parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--no-vocab", action="store_true", help="store model without the vocab") - parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") - parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") - parser.add_argument("--vocab-type", help="vocab types to try in order, choose from 'spm', 'bpe', 'hfft' (default: spm,hfft)", default="spm,hfft") - parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") - parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") - parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY) - parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine") - parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides") - parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") - parser.add_argument("--metadata", type=Path, help="Specify the path for an authorship metadata override file") - parser.add_argument("--get-outfile", action="store_true", help="get calculated default outfile name") - parser.add_argument("--model-name", type=str, default=None, help="name of the model") - - args = parser.parse_args(args_in) - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - elif args.dump_single or args.dump or args.get_outfile: - # Avoid printing anything besides the dump output - logging.basicConfig(level=logging.WARNING) - else: - logging.basicConfig(level=logging.INFO) - - model_name = args.model_name - dir_model = args.model - - metadata = gguf.Metadata.load(args.metadata, dir_model, model_name) - - if args.get_outfile: - model_plus = load_some_model(dir_model) - params = Params.load(model_plus) - model = convert_model_names(model_plus.model, params, args.skip_unknown) - model_params_count = per_model_weight_count_estimation(model_plus.model.items()) - ftype = pick_output_type(model, args.outtype) - - if (metadata is None or metadata.name is None) and params.path_model is not None: - metadata.name = params.path_model.name - - print(f"{default_convention_outfile(ftype, params.n_experts, model_params_count, metadata)}") # noqa: NP100 - return - - if args.no_vocab and args.vocab_only: - raise ValueError("--vocab-only does not make sense with --no-vocab") - - if args.dump_single: - model_plus = lazy_load_file(dir_model) - do_dump_model(model_plus) - return - - if not args.vocab_only: - model_plus = load_some_model(dir_model) - else: - model_plus = ModelPlus(model = {}, paths = [dir_model / 'dummy'], format = 'none', vocab = None) - - if args.dump: - do_dump_model(model_plus) - return - - endianess = gguf.GGUFEndian.LITTLE - if args.big_endian: - endianess = gguf.GGUFEndian.BIG - - params = None - if args.pad_vocab or not args.vocab_only: - params = Params.load(model_plus) - if params.n_ctx == -1: - if args.ctx is None: - msg = """\ - The model doesn't have a context size, and you didn't specify one with --ctx - Please specify one with --ctx: - - LLaMA v1: --ctx 2048 - - LLaMA v2: --ctx 4096""" - parser.error(textwrap.dedent(msg)) - params.n_ctx = args.ctx - - if args.outtype: - params.ftype = { - "f32": GGMLFileType.AllF32, - "f16": GGMLFileType.MostlyF16, - "q8_0": GGMLFileType.MostlyQ8_0, - }[args.outtype] - - logger.info(f"params = {params}") - - model_parent_path = model_plus.paths[0].parent - vocab_path = Path(args.vocab_dir or dir_model or model_parent_path) - vocab_factory = VocabFactory(vocab_path) - vocab_types = None if args.no_vocab else args.vocab_type.split(",") - vocab, special_vocab = vocab_factory.load_vocab(vocab_types, model_parent_path) - - if args.vocab_only: - assert isinstance(vocab, Vocab) - if not args.outfile: - raise ValueError("need --outfile if using --vocab-only") - outfile = args.outfile - if params is None: - params = Params( - n_vocab = vocab.vocab_size, - n_embd = 1, - n_layer = 1, - n_ctx = 1, - n_ff = 1, - n_head = 1, - n_head_kv = 1, - f_norm_eps = 1e-5, - ) - OutputFile.write_vocab_only(outfile, params, vocab, special_vocab, - endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) - logger.info(f"Wrote {outfile}") - return - - if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab: - vocab = model_plus.vocab - - assert params is not None - - if metadata.name is None and params.path_model is not None: - metadata.name = params.path_model.name - - model_params_count = per_model_weight_count_estimation(model_plus.model.items()) - logger.info(f"model parameters count : {model_params_count} ({gguf.model_weight_count_rounded_notation(model_params_count[0])})") - - logger.info(f"Vocab info: {vocab}") - logger.info(f"Special vocab info: {special_vocab}") - model = model_plus.model - model = convert_model_names(model, params, args.skip_unknown) - ftype = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_outfile(model_plus.paths, ftype, params.n_experts, model_params_count, metadata=metadata) - - metadata.size_label = gguf.size_label(*model_params_count, expert_count=params.n_experts or 0) - - params.ftype = ftype - logger.info(f"Writing {outfile}, format {ftype}") - - OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, - concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) - logger.info(f"Wrote {outfile}") - - -if __name__ == '__main__': - main() diff --git a/examples/json_schema_pydantic_example.py b/examples/json_schema_pydantic_example.py deleted file mode 100644 index 19c0bdb5b..000000000 --- a/examples/json_schema_pydantic_example.py +++ /dev/null @@ -1,82 +0,0 @@ -# Usage: -#! ./llama-server -m some-model.gguf & -#! pip install pydantic -#! python json_schema_pydantic_example.py - -from pydantic import BaseModel, Field, TypeAdapter -from annotated_types import MinLen -from typing import Annotated, List, Optional -import json, requests - -if True: - - def create_completion(*, response_model=None, endpoint="http://localhost:8080/v1/chat/completions", messages, **kwargs): - ''' - Creates a chat completion using an OpenAI-compatible endpoint w/ JSON schema support - (llama.cpp server, llama-cpp-python, Anyscale / Together...) - - The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below) - ''' - response_format = None - type_adapter = None - - if response_model: - type_adapter = TypeAdapter(response_model) - schema = type_adapter.json_schema() - messages = [{ - "role": "system", - "content": f"You respond in JSON format with the following schema: {json.dumps(schema, indent=2)}" - }] + messages - response_format={"type": "json_object", "schema": schema} - - data = requests.post(endpoint, headers={"Content-Type": "application/json"}, - json=dict(messages=messages, response_format=response_format, **kwargs)).json() - if 'error' in data: - raise Exception(data['error']['message']) - - content = data["choices"][0]["message"]["content"] - return type_adapter.validate_json(content) if type_adapter else content - -else: - - # This alternative branch uses Instructor + OpenAI client lib. - # Instructor support streamed iterable responses, retry & more. - # (see https://python.useinstructor.com/) - #! pip install instructor openai - import instructor, openai - client = instructor.patch( - openai.OpenAI(api_key="123", base_url="http://localhost:8080"), - mode=instructor.Mode.JSON_SCHEMA) - create_completion = client.chat.completions.create - - -if __name__ == '__main__': - - class QAPair(BaseModel): - class Config: - extra = 'forbid' # triggers additionalProperties: false in the JSON schema - question: str - concise_answer: str - justification: str - stars: Annotated[int, Field(ge=1, le=5)] - - class PyramidalSummary(BaseModel): - class Config: - extra = 'forbid' # triggers additionalProperties: false in the JSON schema - title: str - summary: str - question_answers: Annotated[List[QAPair], MinLen(2)] - sub_sections: Optional[Annotated[List['PyramidalSummary'], MinLen(2)]] - - print("# Summary\n", create_completion( - model="...", - response_model=PyramidalSummary, - messages=[{ - "role": "user", - "content": f""" - You are a highly efficient corporate document summarizer. - Create a pyramidal summary of an imaginary internal document about our company processes - (starting high-level, going down to each sub sections). - Keep questions short, and answers even shorter (trivia / quizz style). - """ - }])) diff --git a/examples/json_schema_to_grammar.py b/examples/json_schema_to_grammar.py deleted file mode 100755 index a8779bf3b..000000000 --- a/examples/json_schema_to_grammar.py +++ /dev/null @@ -1,811 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import argparse -import itertools -import json -import re -import sys -from typing import Any, List, Optional, Set, Tuple, Union - -def _build_repetition(item_rule, min_items, max_items, separator_rule=None): - - if min_items == 0 and max_items == 1: - return f'{item_rule}?' - - if not separator_rule: - if min_items == 1 and max_items is None: - return f'{item_rule}+' - elif min_items == 0 and max_items is None: - return f'{item_rule}*' - else: - return f'{item_rule}{{{min_items},{max_items if max_items is not None else ""}}}' - - result = item_rule + ' ' + _build_repetition(f'({separator_rule} {item_rule})', min_items - 1 if min_items > 0 else 0, max_items - 1 if max_items is not None else None) - return f'({result})?' if min_items == 0 else result - -def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], out: list, decimals_left: int = 16, top_level: bool = True): - has_min = min_value != None - has_max = max_value != None - - def digit_range(from_char: str, to_char: str): - out.append("[") - if from_char == to_char: - out.append(from_char) - else: - out.append(from_char) - out.append("-") - out.append(to_char) - out.append("]") - - def more_digits(min_digits: int, max_digits: int): - out.append("[0-9]") - if min_digits == max_digits and min_digits == 1: - return - out.append("{") - out.append(str(min_digits)) - if max_digits != min_digits: - out.append(",") - if max_digits != sys.maxsize: - out.append(str(max_digits)) - out.append("}") - - def uniform_range(from_str: str, to_str: str): - i = 0 - while i < len(from_str) and from_str[i] == to_str[i]: - i += 1 - if i > 0: - out.append("\"") - out.append(from_str[:i]) - out.append("\"") - if i < len(from_str): - if i > 0: - out.append(" ") - sub_len = len(from_str) - i - 1 - if sub_len > 0: - from_sub = from_str[i+1:] - to_sub = to_str[i+1:] - sub_zeros = "0" * sub_len - sub_nines = "9" * sub_len - - to_reached = False - out.append("(") - if from_sub == sub_zeros: - digit_range(from_str[i], chr(ord(to_str[i]) - 1)) - out.append(" ") - more_digits(sub_len, sub_len) - else: - out.append("[") - out.append(from_str[i]) - out.append("] ") - out.append("(") - uniform_range(from_sub, sub_nines) - out.append(")") - if ord(from_str[i]) < ord(to_str[i]) - 1: - out.append(" | ") - if to_sub == sub_nines: - digit_range(chr(ord(from_str[i]) + 1), to_str[i]) - to_reached = True - else: - digit_range(chr(ord(from_str[i]) + 1), chr(ord(to_str[i]) - 1)) - out.append(" ") - more_digits(sub_len, sub_len) - if not to_reached: - out.append(" | ") - digit_range(to_str[i], to_str[i]) - out.append(" ") - uniform_range(sub_zeros, to_sub) - out.append(")") - else: - out.append("[") - out.append(from_str[i]) - out.append("-") - out.append(to_str[i]) - out.append("]") - - if has_min and has_max: - if min_value < 0 and max_value < 0: - out.append("\"-\" (") - _generate_min_max_int(-max_value, -min_value, out, decimals_left, top_level=True) - out.append(")") - return - - if min_value < 0: - out.append("\"-\" (") - _generate_min_max_int(0, -min_value, out, decimals_left, top_level=True) - out.append(") | ") - min_value = 0 - - min_s = str(min_value) - max_s = str(max_value) - min_digits = len(min_s) - max_digits = len(max_s) - - for digits in range(min_digits, max_digits): - uniform_range(min_s, "9" * digits) - min_s = "1" + "0" * digits - out.append(" | ") - uniform_range(min_s, max_s) - return - - less_decimals = max(decimals_left - 1, 1) - - if has_min: - if min_value < 0: - out.append("\"-\" (") - _generate_min_max_int(None, -min_value, out, decimals_left, top_level=False) - out.append(") | [0] | [1-9] ") - more_digits(0, decimals_left - 1) - elif min_value == 0: - if top_level: - out.append("[0] | [1-9] ") - more_digits(0, less_decimals) - else: - more_digits(1, decimals_left) - elif min_value <= 9: - c = str(min_value) - range_start = '1' if top_level else '0' - if c > range_start: - digit_range(range_start, chr(ord(c) - 1)) - out.append(" ") - more_digits(1, less_decimals) - out.append(" | ") - digit_range(c, "9") - out.append(" ") - more_digits(0, less_decimals) - else: - min_s = str(min_value) - length = len(min_s) - c = min_s[0] - - if c > "1": - digit_range("1" if top_level else "0", chr(ord(c) - 1)) - out.append(" ") - more_digits(length, less_decimals) - out.append(" | ") - digit_range(c, c) - out.append(" (") - _generate_min_max_int(int(min_s[1:]), None, out, less_decimals, top_level=False) - out.append(")") - if c < "9": - out.append(" | ") - digit_range(chr(ord(c) + 1), "9") - out.append(" ") - more_digits(length - 1, less_decimals) - return - - if has_max: - if max_value >= 0: - if top_level: - out.append("\"-\" [1-9] ") - more_digits(0, less_decimals) - out.append(" | ") - _generate_min_max_int(0, max_value, out, decimals_left, top_level=True) - else: - out.append("\"-\" (") - _generate_min_max_int(-max_value, None, out, decimals_left, top_level=False) - out.append(")") - return - - raise RuntimeError("At least one of min_value or max_value must be set") - -class BuiltinRule: - def __init__(self, content: str, deps: list | None = None): - self.content = content - self.deps = deps or [] - -# Constraining spaces to prevent model "running away". -SPACE_RULE = '| " " | "\\n" [ \\t]{0,20}' - -PRIMITIVE_RULES = { - 'boolean' : BuiltinRule('("true" | "false") space', []), - 'decimal-part' : BuiltinRule('[0-9]{1,16}', []), - 'integral-part': BuiltinRule('[0] | [1-9] [0-9]{0,15}', []), - 'number' : BuiltinRule('("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', ['integral-part', 'decimal-part']), - 'integer' : BuiltinRule('("-"? integral-part) space', ['integral-part']), - 'value' : BuiltinRule('object | array | string | number | boolean | null', ['object', 'array', 'string', 'number', 'boolean', 'null']), - 'object' : BuiltinRule('"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', ['string', 'value']), - 'array' : BuiltinRule('"[" space ( value ("," space value)* )? "]" space', ['value']), - 'uuid' : BuiltinRule(r'"\"" [0-9a-fA-F]{8} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{12} "\"" space', []), - 'char' : BuiltinRule(r'[^"\\\x7F\x00-\x1F] | [\\] (["\\bfnrt] | "u" [0-9a-fA-F]{4})', []), - 'string' : BuiltinRule(r'"\"" char* "\"" space', ['char']), - 'null' : BuiltinRule('"null" space', []), -} - -# TODO: support "uri", "email" string formats -STRING_FORMAT_RULES = { - 'date' : BuiltinRule('[0-9]{4} "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )', []), - 'time' : BuiltinRule('([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9]{3} )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', []), - 'date-time' : BuiltinRule('date "T" time', ['date', 'time']), - 'date-string' : BuiltinRule('"\\"" date "\\"" space', ['date']), - 'time-string' : BuiltinRule('"\\"" time "\\"" space', ['time']), - 'date-time-string': BuiltinRule('"\\"" date-time "\\"" space', ['date-time']), -} - -DOTALL = '[\\U00000000-\\U0010FFFF]' -DOT = '[^\\x0A\\x0D]' - -RESERVED_NAMES = set(["root", "dot", *PRIMITIVE_RULES.keys(), *STRING_FORMAT_RULES.keys()]) - -INVALID_RULE_CHARS_RE = re.compile(r'[^a-zA-Z0-9-]+') -GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]') -GRAMMAR_RANGE_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"\]\-\\]') -GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]'} - -NON_LITERAL_SET = set('|.()[]{}*+?') -ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set('^$.[]()|{}*+?') - - -class SchemaConverter: - def __init__(self, *, prop_order, allow_fetch, dotall, raw_pattern): - self._prop_order = prop_order - self._allow_fetch = allow_fetch - self._dotall = dotall - self._raw_pattern = raw_pattern - self._rules = { - 'space': SPACE_RULE, - } - self._refs = {} - self._refs_being_resolved = set() - - def _format_literal(self, literal): - escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub( - lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)) or m.group(0), literal - ) - return f'"{escaped}"' - - def not_literal(self, literal: str, dotall: bool = True, maybe_escaped_underscores = False) -> str: - ''' - not_literal('a') -> '[^a]' - not_literal('abc') -> '([^a] | "a" ([^b] | "b" ([^c])?)?)?' - ''' - assert len(literal) > 0, 'Empty literal not supported' - def recurse(i: int): - c = literal[i] - if maybe_escaped_underscores and c == '_': - yield f'[^{c}\\\\]' - yield ' | ' - yield f'"\\\\"? "{c}"' - else: - yield f'[^{c}]' - if i < len(literal) - 1: - yield ' | ' - yield self._format_literal(c) - yield ' (' - yield from recurse(i + 1) - yield ')?' - - return ''.join(('(', *recurse(0), ')')) - - def _not_strings(self, strings): - class TrieNode: - def __init__(self): - self.children = {} - self.is_end_of_string = False - - def insert(self, string): - node = self - for c in string: - node = node.children.setdefault(c, TrieNode()) - node.is_end_of_string = True - - trie = TrieNode() - for s in strings: - trie.insert(s) - - char_rule = self._add_primitive('char', PRIMITIVE_RULES['char']) - out = ['["] ( '] - - def visit(node): - rejects = [] - first = True - for c in sorted(node.children.keys()): - child = node.children[c] - rejects.append(c) - if first: - first = False - else: - out.append(' | ') - out.append(f'[{c}]') - if child.children: - out.append(f' (') - visit(child) - out.append(')') - elif child.is_end_of_string: - out.append(f' {char_rule}+') - if node.children: - if not first: - out.append(' | ') - out.append(f'[^"{"".join(rejects)}] {char_rule}*') - visit(trie) - - out.append(f' ){"" if trie.is_end_of_string else "?"} ["] space') - return ''.join(out) - - def _add_rule(self, name, rule): - esc_name = INVALID_RULE_CHARS_RE.sub('-', name) - if esc_name not in self._rules or self._rules[esc_name] == rule: - key = esc_name - else: - i = 0 - while f'{esc_name}{i}' in self._rules and self._rules[f'{esc_name}{i}'] != rule: - i += 1 - key = f'{esc_name}{i}' - self._rules[key] = rule - return key - - def resolve_refs(self, schema: dict, url: str): - ''' - Resolves all $ref fields in the given schema, fetching any remote schemas, - replacing $ref with absolute reference URL and populating self._refs with the - respective referenced (sub)schema dictionaries. - ''' - def visit(n: dict): - if isinstance(n, list): - return [visit(x) for x in n] - elif isinstance(n, dict): - ref = n.get('$ref') - if ref is not None and ref not in self._refs: - if ref.startswith('https://'): - assert self._allow_fetch, 'Fetching remote schemas is not allowed (use --allow-fetch for force)' - import requests - - frag_split = ref.split('#') - base_url = frag_split[0] - - target = self._refs.get(base_url) - if target is None: - target = self.resolve_refs(requests.get(ref).json(), base_url) - self._refs[base_url] = target - - if len(frag_split) == 1 or frag_split[-1] == '': - return target - elif ref.startswith('#/'): - target = schema - ref = f'{url}{ref}' - n['$ref'] = ref - else: - raise ValueError(f'Unsupported ref {ref}') - - for sel in ref.split('#')[-1].split('/')[1:]: - assert target is not None and sel in target, f'Error resolving ref {ref}: {sel} not in {target}' - target = target[sel] - - self._refs[ref] = target - else: - for v in n.values(): - visit(v) - - return n - return visit(schema) - - def _generate_union_rule(self, name, alt_schemas): - return ' | '.join(( - self.visit(alt_schema, f'{name}{"-" if name else "alternative-"}{i}') - for i, alt_schema in enumerate(alt_schemas) - )) - - def _visit_pattern(self, pattern, name): - ''' - Transforms a regular expression pattern into a GBNF rule. - - Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions - Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md - - Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers. - - Mostly a 1:1 translation, except for {x} / {x,} / {x,y} quantifiers for which - we define sub-rules to keep the output lean. - ''' - - assert pattern.startswith('^') and pattern.endswith('$'), 'Pattern must start with "^" and end with "$"' - pattern = pattern[1:-1] - sub_rule_ids = {} - - i = 0 - length = len(pattern) - - def to_rule(s: tuple[str, bool]) -> str: - (txt, is_literal) = s - return "\"" + txt + "\"" if is_literal else txt - - def transform() -> tuple[str, bool]: - ''' - Parse a unit at index i (advancing it), and return its string representation + whether it's a literal. - ''' - nonlocal i - nonlocal pattern - nonlocal sub_rule_ids - - start = i - # For each component of this sequence, store its string representation and whether it's a literal. - # We only need a flat structure here to apply repetition operators to the last item, and - # to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially - # (GBNF's syntax is luckily very close to regular expressions!) - seq: list[tuple[str, bool]] = [] - - def get_dot(): - if self._dotall: - rule = DOTALL - else: - # Accept any character... except \n and \r line break chars (\x0A and \xOD) - rule = DOT - return self._add_rule(f'dot', rule) - - def join_seq(): - nonlocal seq - ret = [] - for is_literal, g in itertools.groupby(seq, lambda x: x[1]): - if is_literal: - ret.append((''.join(x[0] for x in g), True)) - else: - ret.extend(g) - if len(ret) == 1: - return ret[0] - return (' '.join(to_rule(x) for x in seq), False) - - while i < length: - c = pattern[i] - if c == '.': - seq.append((get_dot(), False)) - i += 1 - elif c == '(': - i += 1 - if i < length: - assert pattern[i] != '?', f'Unsupported pattern syntax "{pattern[i]}" at index {i} of /{pattern}/' - seq.append((f'({to_rule(transform())})', False)) - elif c == ')': - i += 1 - assert start > 0 and pattern[start-1] == '(', f'Unbalanced parentheses; start = {start}, i = {i}, pattern = {pattern}' - return join_seq() - elif c == '[': - square_brackets = c - i += 1 - while i < length and pattern[i] != ']': - if pattern[i] == '\\': - square_brackets += pattern[i:i+2] - i += 2 - else: - square_brackets += pattern[i] - i += 1 - assert i < length, f'Unbalanced square brackets; start = {start}, i = {i}, pattern = {pattern}' - square_brackets += ']' - i += 1 - seq.append((square_brackets, False)) - elif c == '|': - seq.append(('|', False)) - i += 1 - elif c in ('*', '+', '?'): - seq[-1] = (to_rule(seq[-1]) + c, False) - i += 1 - elif c == '{': - curly_brackets = c - i += 1 - while i < length and pattern[i] != '}': - curly_brackets += pattern[i] - i += 1 - assert i < length, f'Unbalanced curly brackets; start = {start}, i = {i}, pattern = {pattern}' - curly_brackets += '}' - i += 1 - nums = [s.strip() for s in curly_brackets[1:-1].split(',')] - min_times = 0 - max_times = None - try: - if len(nums) == 1: - min_times = int(nums[0]) - max_times = min_times - else: - assert len(nums) == 2 - min_times = int(nums[0]) if nums[0] else 0 - max_times = int(nums[1]) if nums[1] else None - except ValueError: - raise ValueError(f'Invalid quantifier {curly_brackets} in /{pattern}/') - - (sub, sub_is_literal) = seq[-1] - - if not sub_is_literal: - id = sub_rule_ids.get(sub) - if id is None: - id = self._add_rule(f'{name}-{len(sub_rule_ids) + 1}', sub) - sub_rule_ids[sub] = id - sub = id - - seq[-1] = (_build_repetition(f'"{sub}"' if sub_is_literal else sub, min_times, max_times), False) - else: - literal = '' - while i < length: - if pattern[i] == '\\' and i < length - 1: - next = pattern[i + 1] - if next in ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS: - i += 1 - literal += pattern[i] - i += 1 - else: - literal += pattern[i:i+2] - i += 2 - elif pattern[i] == '"' and not self._raw_pattern: - literal += '\\"' - i += 1 - elif pattern[i] not in NON_LITERAL_SET and \ - (i == length - 1 or literal == '' or pattern[i+1] == '.' or pattern[i+1] not in NON_LITERAL_SET): - literal += pattern[i] - i += 1 - else: - break - if literal: - seq.append((literal, True)) - - return join_seq() - - return self._add_rule( - name, - to_rule(transform()) if self._raw_pattern \ - else "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space") - - - def _resolve_ref(self, ref): - ref_name = ref.split('/')[-1] - if ref_name not in self._rules and ref not in self._refs_being_resolved: - self._refs_being_resolved.add(ref) - resolved = self._refs[ref] - ref_name = self.visit(resolved, ref_name) - self._refs_being_resolved.remove(ref) - return ref_name - - def _generate_constant_rule(self, value): - return self._format_literal(json.dumps(value)) - - def visit(self, schema, name): - schema_type = schema.get('type') - schema_format = schema.get('format') - rule_name = name + '-' if name in RESERVED_NAMES else name or 'root' - - if (ref := schema.get('$ref')) is not None: - return self._add_rule(rule_name, self._resolve_ref(ref)) - - elif 'oneOf' in schema or 'anyOf' in schema: - return self._add_rule(rule_name, self._generate_union_rule(name, schema.get('oneOf') or schema['anyOf'])) - - elif isinstance(schema_type, list): - return self._add_rule(rule_name, self._generate_union_rule(name, [{**schema, 'type': t} for t in schema_type])) - - elif 'const' in schema: - return self._add_rule(rule_name, self._generate_constant_rule(schema['const']) + ' space') - - elif 'enum' in schema: - rule = '(' + ' | '.join((self._generate_constant_rule(v) for v in schema['enum'])) + ') space' - return self._add_rule(rule_name, rule) - - elif schema_type in (None, 'object') and \ - ('properties' in schema or \ - ('additionalProperties' in schema and schema['additionalProperties'] is not True)): - required = set(schema.get('required', [])) - properties = list(schema.get('properties', {}).items()) - return self._add_rule(rule_name, self._build_object_rule(properties, required, name, schema.get('additionalProperties'))) - - elif schema_type in (None, 'object') and 'allOf' in schema: - required = set() - properties = [] - hybrid_name = name - def add_component(comp_schema, is_required): - if (ref := comp_schema.get('$ref')) is not None: - comp_schema = self._refs[ref] - - if 'properties' in comp_schema: - for prop_name, prop_schema in comp_schema['properties'].items(): - properties.append((prop_name, prop_schema)) - if is_required: - required.add(prop_name) - - for t in schema['allOf']: - if 'anyOf' in t: - for tt in t['anyOf']: - add_component(tt, is_required=False) - else: - add_component(t, is_required=True) - - return self._add_rule(rule_name, self._build_object_rule(properties, required, hybrid_name, additional_properties=None)) - - elif schema_type in (None, 'array') and ('items' in schema or 'prefixItems' in schema): - items = schema.get('items') or schema['prefixItems'] - if isinstance(items, list): - return self._add_rule( - rule_name, - '"[" space ' + - ' "," space '.join( - self.visit(item, f'{name}{"-" if name else ""}tuple-{i}') - for i, item in enumerate(items)) + - ' "]" space') - else: - item_rule_name = self.visit(items, f'{name}{"-" if name else ""}item') - min_items = schema.get("minItems", 0) - max_items = schema.get("maxItems") - return self._add_rule(rule_name, '"[" space ' + _build_repetition(item_rule_name, min_items, max_items, separator_rule='"," space') + ' "]" space') - - elif schema_type in (None, 'string') and 'pattern' in schema: - return self._visit_pattern(schema['pattern'], rule_name) - - elif schema_type in (None, 'string') and re.match(r'^uuid[1-5]?$', schema_format or ''): - return self._add_primitive( - 'root' if rule_name == 'root' else schema_format, - PRIMITIVE_RULES['uuid'] - ) - - elif schema_type in (None, 'string') and f'{schema_format}-string' in STRING_FORMAT_RULES: - prim_name = f'{schema_format}-string' - return self._add_rule(rule_name, self._add_primitive(prim_name, STRING_FORMAT_RULES[prim_name])) - - elif schema_type == 'string' and ('minLength' in schema or 'maxLength' in schema): - char_rule = self._add_primitive('char', PRIMITIVE_RULES['char']) - min_len = schema.get('minLength', 0) - max_len = schema.get('maxLength') - - return self._add_rule(rule_name, r'"\"" ' + _build_repetition(char_rule, min_len, max_len) + r' "\"" space') - - elif schema_type in (None, 'integer') and \ - ('minimum' in schema or 'exclusiveMinimum' in schema or 'maximum' in schema or 'exclusiveMaximum' in schema): - min_value = None - max_value = None - if 'minimum' in schema: - min_value = schema['minimum'] - elif 'exclusiveMinimum' in schema: - min_value = schema['exclusiveMinimum'] + 1 - if 'maximum' in schema: - max_value = schema['maximum'] - elif 'exclusiveMaximum' in schema: - max_value = schema['exclusiveMaximum'] - 1 - - out = ["("] - _generate_min_max_int(min_value, max_value, out) - out.append(") space") - return self._add_rule(rule_name, ''.join(out)) - - elif (schema_type == 'object') or (len(schema) == 0): - return self._add_rule(rule_name, self._add_primitive('object', PRIMITIVE_RULES['object'])) - - else: - assert schema_type in PRIMITIVE_RULES, f'Unrecognized schema: {schema}' - # TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero - return self._add_primitive('root' if rule_name == 'root' else schema_type, PRIMITIVE_RULES[schema_type]) - - def _add_primitive(self, name: str, rule: BuiltinRule): - n = self._add_rule(name, rule.content) - - for dep in rule.deps: - dep_rule = PRIMITIVE_RULES.get(dep) or STRING_FORMAT_RULES.get(dep) - assert dep_rule, f'Rule {dep} not known' - if dep not in self._rules: - self._add_primitive(dep, dep_rule) - return n - - def _build_object_rule(self, properties: List[Tuple[str, Any]], required: Set[str], name: str, additional_properties: Optional[Union[bool, Any]]): - prop_order = self._prop_order - # sort by position in prop_order (if specified) then by original order - sorted_props = [kv[0] for _, kv in sorted(enumerate(properties), key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]))] - - prop_kv_rule_names = {} - for prop_name, prop_schema in properties: - prop_rule_name = self.visit(prop_schema, f'{name}{"-" if name else ""}{prop_name}') - prop_kv_rule_names[prop_name] = self._add_rule( - f'{name}{"-" if name else ""}{prop_name}-kv', - fr'{self._format_literal(json.dumps(prop_name))} space ":" space {prop_rule_name}' - ) - required_props = [k for k in sorted_props if k in required] - optional_props = [k for k in sorted_props if k not in required] - - if additional_properties is not None and additional_properties != False: - sub_name = f'{name}{"-" if name else ""}additional' - value_rule = self.visit(additional_properties, f'{sub_name}-value') if isinstance(additional_properties, dict) else \ - self._add_primitive('value', PRIMITIVE_RULES['value']) - key_rule = self._add_primitive('string', PRIMITIVE_RULES['string']) if not sorted_props \ - else self._add_rule(f'{sub_name}-k', self._not_strings(sorted_props)) - - prop_kv_rule_names["*"] = self._add_rule( - f'{sub_name}-kv', - f'{key_rule} ":" space {value_rule}' - ) - optional_props.append("*") - - rule = '"{" space ' - rule += ' "," space '.join(prop_kv_rule_names[k] for k in required_props) - - if optional_props: - rule += ' (' - if required_props: - rule += ' "," space ( ' - - def get_recursive_refs(ks, first_is_optional): - [k, *rest] = ks - kv_rule_name = prop_kv_rule_names[k] - comma_ref = f'( "," space {kv_rule_name} )' - if first_is_optional: - res = comma_ref + ('*' if k == '*' else '?') - else: - res = kv_rule_name + (' ' + comma_ref + "*" if k == '*' else '') - if len(rest) > 0: - res += ' ' + self._add_rule( - f'{name}{"-" if name else ""}{k}-rest', - get_recursive_refs(rest, first_is_optional=True) - ) - return res - - rule += ' | '.join( - get_recursive_refs(optional_props[i:], first_is_optional=False) - for i in range(len(optional_props)) - ) - if required_props: - rule += ' )' - rule += ' )?' - - rule += ' "}" space' - - return rule - - def format_grammar(self): - return '\n'.join( - f'{name} ::= {rule}' - for name, rule in sorted(self._rules.items(), key=lambda kv: kv[0]) - ) - - -def main(args_in = None): - parser = argparse.ArgumentParser( - description=''' - Generates a grammar (suitable for use in ./llama-cli) that produces JSON conforming to a - given JSON schema. Only a subset of JSON schema features are supported; more may be - added in the future. - ''', - ) - parser.add_argument( - '--prop-order', - default=[], - type=lambda s: s.split(','), - help=''' - comma-separated property names defining the order of precedence for object properties; - properties not specified here are given lower precedence than those that are, and - are kept in their original order from the schema. Required properties are always - given precedence over optional properties. - ''' - ) - parser.add_argument( - '--allow-fetch', - action='store_true', - default=False, - help='Whether to allow fetching referenced schemas over HTTPS') - parser.add_argument( - '--dotall', - action='store_true', - default=False, - help='Whether to treat dot (".") as matching all chars including line breaks in regular expression patterns') - parser.add_argument( - '--raw-pattern', - action='store_true', - default=False, - help='Treats string patterns as raw patterns w/o quotes (or quote escapes)') - - parser.add_argument('schema', help='file containing JSON schema ("-" for stdin)') - args = parser.parse_args(args_in) - - if args.schema.startswith('https://'): - url = args.schema - import requests - schema = requests.get(url).json() - elif args.schema == '-': - url = 'stdin' - schema = json.load(sys.stdin) - else: - url = f'file://{args.schema}' - with open(args.schema) as f: - schema = json.load(f) - converter = SchemaConverter( - prop_order={name: idx for idx, name in enumerate(args.prop_order)}, - allow_fetch=args.allow_fetch, - dotall=args.dotall, - raw_pattern=args.raw_pattern) - schema = converter.resolve_refs(schema, url) - converter.visit(schema, '') - print(converter.format_grammar()) - - -if __name__ == '__main__': - main() diff --git a/examples/llama.vim b/examples/llama.vim deleted file mode 100644 index 1b5ad6ba0..000000000 --- a/examples/llama.vim +++ /dev/null @@ -1,135 +0,0 @@ -" Requires an already running llama.cpp server -" To install either copy or symlink to ~/.vim/autoload/llama.vim -" Then start with either :call llama#doLlamaGen(), -" or add a keybind to your vimrc such as -" nnoremap Z :call llama#doLlamaGen() -" Similarly, you could add an insert mode keybind with -" inoremap call llama#doLlamaGen() -" -" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc -" let g:llama_api_url = "192.168.1.10:8080" -" llama_overrides can also be set through buffer/window scopes. For instance -" autocmd filetype python let b:llama_overrides = {"temp": 0.2} -" Could be added to your .vimrc to automatically set a lower temperature when -" editing a python script -" Additionally, an override dict can be stored at the top of a file -" !*{"stop": ["User:"]} -" Could be added to the start of your chatlog.txt to set the stopping token -" These parameter dicts are merged together from lowest to highest priority: -" server default -> g:llama_overrides -> w:llama_overrides -> -" b:llama_overrides -> in file (!*) overrides -" -" Sublists (like logit_bias and stop) are overridden, not merged -" Example override: -" !*{"logit_bias": [[13, -5], [2, false]], "temperature": 1, "top_k": 5, "top_p": 0.5, "n_predict": 256, "repeat_last_n": 256, "repeat_penalty": 1.17647} -if !exists("g:llama_api_url") - let g:llama_api_url= "127.0.0.1:8080" -endif -if !exists("g:llama_overrides") - let g:llama_overrides = {} -endif -const s:querydata = {"n_predict": 256, "stop": [ "\n" ], "stream": v:true } -const s:curlcommand = ['curl','--data-raw', "{\"prompt\":\"### System:\"}", '--silent', '--no-buffer', '--request', 'POST', '--url', g:llama_api_url .. '/completion', '--header', "Content-Type: application/json"] -let s:linedict = {} - -func s:callbackHandler(bufn, channel, msg) - if len(a:msg) < 3 - return - elseif a:msg[0] == "d" - let l:msg = a:msg[6:-1] - else - let l:msg = a:msg - endif - let l:decoded_msg = json_decode(l:msg) - let l:newtext = split(l:decoded_msg['content'], "\n", 1) - if len(l:newtext) > 0 - call setbufline(a:bufn, s:linedict[a:bufn], getbufline(a:bufn, s:linedict[a:bufn])[0] .. newtext[0]) - else - echo "nothing genned" - endif - if len(newtext) > 1 - let l:failed = appendbufline(a:bufn, s:linedict[a:bufn], newtext[1:-1]) - let s:linedict[a:bufn] = s:linedict[a:bufn] + len(newtext)-1 - endif - if has_key(l:decoded_msg, "stop") && l:decoded_msg.stop - echo "Finished generation" - endif -endfunction - -func llama#doLlamaGen() - if exists("b:job") - if job_status(b:job) == "run" - call job_stop(b:job) - return - endif - endif - - let l:cbuffer = bufnr("%") - let s:linedict[l:cbuffer] = line('$') - let l:buflines = getbufline(l:cbuffer, 1, 1000) - let l:querydata = copy(s:querydata) - call extend(l:querydata, g:llama_overrides) - if exists("w:llama_overrides") - call extend(l:querydata, w:llama_overrides) - endif - if exists("b:llama_overrides") - call extend(l:querydata, b:llama_overrides) - endif - if l:buflines[0][0:1] == '!*' - let l:userdata = json_decode(l:buflines[0][2:-1]) - call extend(l:querydata, l:userdata) - let l:buflines = l:buflines[1:-1] - endif - let l:querydata.prompt = join(l:buflines, "\n") - let l:curlcommand = copy(s:curlcommand) - if exists("g:llama_api_key") - call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key]) - endif - let l:curlcommand[2] = json_encode(l:querydata) - let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])}) -endfunction - -" Echos the tokkenization of the provided string , or cursor to end of word -" Onus is placed on the user to include the preceding space -func llama#tokenizeWord(...) - if (a:0 > 0) - let l:input = a:1 - else - exe "normal \"*ye" - let l:input = @* - endif - let l:querydata = {"content": l:input} - let l:curlcommand = copy(s:curlcommand) - let l:curlcommand[2] = json_encode(l:querydata) - let l:curlcommand[8] = g:llama_api_url .. "/tokenize" - let s:token_job = job_start(l:curlcommand, {"callback": function("s:tokenizeWordCallback", [l:input])}) -endfunction - -func s:tokenizeWordCallback(plaintext, channel, msg) - echo '"' .. a:plaintext ..'" - ' .. string(json_decode(a:msg).tokens) -endfunction - - -" Echos the token count of the entire buffer (or provided string) -" Example usage :echo llama#tokenCount() -func llama#tokenCount(...) - if (a:0 > 0) - let l:buflines = a:1 - else - let l:buflines = getline(1,1000) - if l:buflines[0][0:1] == '!*' - let l:buflines = l:buflines[1:-1] - endif - let l:buflines = join(l:buflines, "\n") - endif - let l:querydata = {"content": l:buflines} - let l:curlcommand = copy(s:curlcommand) - let l:curlcommand[2] = json_encode(l:querydata) - let l:curlcommand[8] = g:llama_api_url .. "/tokenize" - let s:token_job = job_start(l:curlcommand, {"callback": "s:tokenCountCallback"}) -endfunction - -func s:tokenCountCallback(channel, msg) - let resp = json_decode(a:msg) - echo len(resp.tokens) -endfunction diff --git a/examples/llm.vim b/examples/llm.vim deleted file mode 100644 index d580a3d00..000000000 --- a/examples/llm.vim +++ /dev/null @@ -1,28 +0,0 @@ -" Basic plugin example - -function! Llm() - - let url = "http://127.0.0.1:8080/completion" - - " Get the content of the current buffer - let buffer_content = join(getline(1, '$'), "\n") - - " Create the JSON payload - let json_payload = {"temp":0.72,"top_k":100,"top_p":0.73,"repeat_penalty":1.100000023841858,"n_predict":256,"stop": ["\n\n\n"],"stream": v:false} - let json_payload.prompt = buffer_content - - " Define the curl command - let curl_command = 'curl -k -s -X POST -H "Content-Type: application/json" -d @- ' . url - let response = system(curl_command, json_encode(json_payload)) - - " Extract the content field from the response - let content = json_decode(response).content - - let split_newlines = split(content, '\n', 1) - - " Insert the content at the cursor position - call setline(line('.'), [ getline('.') . split_newlines[0] ] + split_newlines[1:]) -endfunction - -command! Llm call Llm() -noremap :Llm diff --git a/examples/pydantic_models_to_grammar.py b/examples/pydantic_models_to_grammar.py deleted file mode 100644 index 93e5dcb6c..000000000 --- a/examples/pydantic_models_to_grammar.py +++ /dev/null @@ -1,1322 +0,0 @@ -from __future__ import annotations - -import inspect -import json -import re -from copy import copy -from enum import Enum -from inspect import getdoc, isclass -from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union, get_args, get_origin, get_type_hints - -from docstring_parser import parse -from pydantic import BaseModel, create_model - -if TYPE_CHECKING: - from types import GenericAlias -else: - # python 3.8 compat - from typing import _GenericAlias as GenericAlias - -# TODO: fix this -# pyright: reportAttributeAccessIssue=information - - -class PydanticDataType(Enum): - """ - Defines the data types supported by the grammar_generator. - - Attributes: - STRING (str): Represents a string data type. - BOOLEAN (str): Represents a boolean data type. - INTEGER (str): Represents an integer data type. - FLOAT (str): Represents a float data type. - OBJECT (str): Represents an object data type. - ARRAY (str): Represents an array data type. - ENUM (str): Represents an enum data type. - CUSTOM_CLASS (str): Represents a custom class data type. - """ - - STRING = "string" - TRIPLE_QUOTED_STRING = "triple_quoted_string" - MARKDOWN_CODE_BLOCK = "markdown_code_block" - BOOLEAN = "boolean" - INTEGER = "integer" - FLOAT = "float" - OBJECT = "object" - ARRAY = "array" - ENUM = "enum" - ANY = "any" - NULL = "null" - CUSTOM_CLASS = "custom-class" - CUSTOM_DICT = "custom-dict" - SET = "set" - - -def map_pydantic_type_to_gbnf(pydantic_type: type[Any]) -> str: - origin_type = get_origin(pydantic_type) - origin_type = pydantic_type if origin_type is None else origin_type - - if isclass(origin_type) and issubclass(origin_type, str): - return PydanticDataType.STRING.value - elif isclass(origin_type) and issubclass(origin_type, bool): - return PydanticDataType.BOOLEAN.value - elif isclass(origin_type) and issubclass(origin_type, int): - return PydanticDataType.INTEGER.value - elif isclass(origin_type) and issubclass(origin_type, float): - return PydanticDataType.FLOAT.value - elif isclass(origin_type) and issubclass(origin_type, Enum): - return PydanticDataType.ENUM.value - - elif isclass(origin_type) and issubclass(origin_type, BaseModel): - return format_model_and_field_name(origin_type.__name__) - elif origin_type is list: - element_type = get_args(pydantic_type)[0] - return f"{map_pydantic_type_to_gbnf(element_type)}-list" - elif origin_type is set: - element_type = get_args(pydantic_type)[0] - return f"{map_pydantic_type_to_gbnf(element_type)}-set" - elif origin_type is Union: - union_types = get_args(pydantic_type) - union_rules = [map_pydantic_type_to_gbnf(ut) for ut in union_types] - return f"union-{'-or-'.join(union_rules)}" - elif origin_type is Optional: - element_type = get_args(pydantic_type)[0] - return f"optional-{map_pydantic_type_to_gbnf(element_type)}" - elif isclass(origin_type): - return f"{PydanticDataType.CUSTOM_CLASS.value}-{format_model_and_field_name(origin_type.__name__)}" - elif origin_type is dict: - key_type, value_type = get_args(pydantic_type) - return f"custom-dict-key-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(key_type))}-value-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(value_type))}" - else: - return "unknown" - - -def format_model_and_field_name(model_name: str) -> str: - parts = re.findall("[A-Z][^A-Z]*", model_name) - if not parts: # Check if the list is empty - return model_name.lower().replace("_", "-") - return "-".join(part.lower().replace("_", "-") for part in parts) - - -def generate_list_rule(element_type): - """ - Generate a GBNF rule for a list of a given element type. - - :param element_type: The type of the elements in the list (e.g., 'string'). - :return: A string representing the GBNF rule for a list of the given type. - """ - rule_name = f"{map_pydantic_type_to_gbnf(element_type)}-list" - element_rule = map_pydantic_type_to_gbnf(element_type) - list_rule = rf'{rule_name} ::= "[" {element_rule} ("," {element_rule})* "]"' - return list_rule - - -def get_members_structure(cls, rule_name): - if issubclass(cls, Enum): - # Handle Enum types - members = [f'"\\"{member.value}\\""' for name, member in cls.__members__.items()] - return f"{cls.__name__.lower()} ::= " + " | ".join(members) - if cls.__annotations__ and cls.__annotations__ != {}: - result = f'{rule_name} ::= "{{"' - # Modify this comprehension - members = [ - f' "\\"{name}\\"" ":" {map_pydantic_type_to_gbnf(param_type)}' - for name, param_type in get_type_hints(cls).items() - if name != "self" - ] - - result += '"," '.join(members) - result += ' "}"' - return result - if rule_name == "custom-class-any": - result = f"{rule_name} ::= " - result += "value" - return result - - init_signature = inspect.signature(cls.__init__) - parameters = init_signature.parameters - result = f'{rule_name} ::= "{{"' - # Modify this comprehension too - members = [ - f' "\\"{name}\\"" ":" {map_pydantic_type_to_gbnf(param.annotation)}' - for name, param in parameters.items() - if name != "self" and param.annotation != inspect.Parameter.empty - ] - - result += '", "'.join(members) - result += ' "}"' - return result - - -def regex_to_gbnf(regex_pattern: str) -> str: - """ - Translate a basic regex pattern to a GBNF rule. - Note: This function handles only a subset of simple regex patterns. - """ - gbnf_rule = regex_pattern - - # Translate common regex components to GBNF - gbnf_rule = gbnf_rule.replace("\\d", "[0-9]") - gbnf_rule = gbnf_rule.replace("\\s", "[ \t\n]") - - # Handle quantifiers and other regex syntax that is similar in GBNF - # (e.g., '*', '+', '?', character classes) - - return gbnf_rule - - -def generate_gbnf_integer_rules(max_digit=None, min_digit=None): - """ - - Generate GBNF Integer Rules - - Generates GBNF (Generalized Backus-Naur Form) rules for integers based on the given maximum and minimum digits. - - Parameters: - max_digit (int): The maximum number of digits for the integer. Default is None. - min_digit (int): The minimum number of digits for the integer. Default is None. - - Returns: - integer_rule (str): The identifier for the integer rule generated. - additional_rules (list): A list of additional rules generated based on the given maximum and minimum digits. - - """ - additional_rules = [] - - # Define the rule identifier based on max_digit and min_digit - integer_rule = "integer-part" - if max_digit is not None: - integer_rule += f"-max{max_digit}" - if min_digit is not None: - integer_rule += f"-min{min_digit}" - - # Handling Integer Rules - if max_digit is not None or min_digit is not None: - # Start with an empty rule part - integer_rule_part = "" - - # Add mandatory digits as per min_digit - if min_digit is not None: - integer_rule_part += "[0-9] " * min_digit - - # Add optional digits up to max_digit - if max_digit is not None: - optional_digits = max_digit - (min_digit if min_digit is not None else 0) - integer_rule_part += "".join(["[0-9]? " for _ in range(optional_digits)]) - - # Trim the rule part and append it to additional rules - integer_rule_part = integer_rule_part.strip() - if integer_rule_part: - additional_rules.append(f"{integer_rule} ::= {integer_rule_part}") - - return integer_rule, additional_rules - - -def generate_gbnf_float_rules(max_digit=None, min_digit=None, max_precision=None, min_precision=None): - """ - Generate GBNF float rules based on the given constraints. - - :param max_digit: Maximum number of digits in the integer part (default: None) - :param min_digit: Minimum number of digits in the integer part (default: None) - :param max_precision: Maximum number of digits in the fractional part (default: None) - :param min_precision: Minimum number of digits in the fractional part (default: None) - :return: A tuple containing the float rule and additional rules as a list - - Example Usage: - max_digit = 3 - min_digit = 1 - max_precision = 2 - min_precision = 1 - generate_gbnf_float_rules(max_digit, min_digit, max_precision, min_precision) - - Output: - ('float-3-1-2-1', ['integer-part-max3-min1 ::= [0-9] [0-9] [0-9]?', 'fractional-part-max2-min1 ::= [0-9] [0-9]?', 'float-3-1-2-1 ::= integer-part-max3-min1 "." fractional-part-max2-min - *1']) - - Note: - GBNF stands for Generalized Backus-Naur Form, which is a notation technique to specify the syntax of programming languages or other formal grammars. - """ - additional_rules = [] - - # Define the integer part rule - integer_part_rule = ( - "integer-part" - + (f"-max{max_digit}" if max_digit is not None else "") - + (f"-min{min_digit}" if min_digit is not None else "") - ) - - # Define the fractional part rule based on precision constraints - fractional_part_rule = "fractional-part" - fractional_rule_part = "" - if max_precision is not None or min_precision is not None: - fractional_part_rule += (f"-max{max_precision}" if max_precision is not None else "") + ( - f"-min{min_precision}" if min_precision is not None else "" - ) - # Minimum number of digits - fractional_rule_part = "[0-9]" * (min_precision if min_precision is not None else 1) - # Optional additional digits - fractional_rule_part += "".join( - [" [0-9]?"] * ((max_precision - ( - min_precision if min_precision is not None else 1)) if max_precision is not None else 0) - ) - additional_rules.append(f"{fractional_part_rule} ::= {fractional_rule_part}") - - # Define the float rule - float_rule = f"float-{max_digit if max_digit is not None else 'X'}-{min_digit if min_digit is not None else 'X'}-{max_precision if max_precision is not None else 'X'}-{min_precision if min_precision is not None else 'X'}" - additional_rules.append(f'{float_rule} ::= {integer_part_rule} "." {fractional_part_rule}') - - # Generating the integer part rule definition, if necessary - if max_digit is not None or min_digit is not None: - integer_rule_part = "[0-9]" - if min_digit is not None and min_digit > 1: - integer_rule_part += " [0-9]" * (min_digit - 1) - if max_digit is not None: - integer_rule_part += "".join([" [0-9]?"] * (max_digit - (min_digit if min_digit is not None else 1))) - additional_rules.append(f"{integer_part_rule} ::= {integer_rule_part.strip()}") - - return float_rule, additional_rules - - -def generate_gbnf_rule_for_type( - model_name, field_name, field_type, is_optional, processed_models, created_rules, field_info=None -) -> tuple[str, list[str]]: - """ - Generate GBNF rule for a given field type. - - :param model_name: Name of the model. - - :param field_name: Name of the field. - :param field_type: Type of the field. - :param is_optional: Whether the field is optional. - :param processed_models: List of processed models. - :param created_rules: List of created rules. - :param field_info: Additional information about the field (optional). - - :return: Tuple containing the GBNF type and a list of additional rules. - :rtype: tuple[str, list] - """ - rules = [] - - field_name = format_model_and_field_name(field_name) - gbnf_type = map_pydantic_type_to_gbnf(field_type) - - origin_type = get_origin(field_type) - origin_type = field_type if origin_type is None else origin_type - - if isclass(origin_type) and issubclass(origin_type, BaseModel): - nested_model_name = format_model_and_field_name(field_type.__name__) - nested_model_rules, _ = generate_gbnf_grammar(field_type, processed_models, created_rules) - rules.extend(nested_model_rules) - gbnf_type, rules = nested_model_name, rules - elif isclass(origin_type) and issubclass(origin_type, Enum): - enum_values = [f'"\\"{e.value}\\""' for e in field_type] # Adding escaped quotes - enum_rule = f"{model_name}-{field_name} ::= {' | '.join(enum_values)}" - rules.append(enum_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - elif origin_type is list: # Array - element_type = get_args(field_type)[0] - element_rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules - ) - rules.extend(additional_rules) - array_rule = f"""{model_name}-{field_name} ::= "[" ws {element_rule_name} ("," ws {element_rule_name})* "]" """ - rules.append(array_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - - elif origin_type is set: # Array - element_type = get_args(field_type)[0] - element_rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules - ) - rules.extend(additional_rules) - array_rule = f"""{model_name}-{field_name} ::= "[" ws {element_rule_name} ("," ws {element_rule_name})* "]" """ - rules.append(array_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - - elif gbnf_type.startswith("custom-class-"): - rules.append(get_members_structure(field_type, gbnf_type)) - elif gbnf_type.startswith("custom-dict-"): - key_type, value_type = get_args(field_type) - - additional_key_type, additional_key_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-key-type", key_type, is_optional, processed_models, created_rules - ) - additional_value_type, additional_value_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-value-type", value_type, is_optional, processed_models, created_rules - ) - gbnf_type = rf'{gbnf_type} ::= "{{" ( {additional_key_type} ": " {additional_value_type} ("," "\n" ws {additional_key_type} ":" {additional_value_type})* )? "}}" ' - - rules.extend(additional_key_rules) - rules.extend(additional_value_rules) - elif gbnf_type.startswith("union-"): - union_types = get_args(field_type) - union_rules = [] - - for union_type in union_types: - if isinstance(union_type, GenericAlias): - union_gbnf_type, union_rules_list = generate_gbnf_rule_for_type( - model_name, field_name, union_type, False, processed_models, created_rules - ) - union_rules.append(union_gbnf_type) - rules.extend(union_rules_list) - - elif not issubclass(union_type, type(None)): - union_gbnf_type, union_rules_list = generate_gbnf_rule_for_type( - model_name, field_name, union_type, False, processed_models, created_rules - ) - union_rules.append(union_gbnf_type) - rules.extend(union_rules_list) - - # Defining the union grammar rule separately - if len(union_rules) == 1: - union_grammar_rule = f"{model_name}-{field_name}-optional ::= {' | '.join(union_rules)} | null" - else: - union_grammar_rule = f"{model_name}-{field_name}-union ::= {' | '.join(union_rules)}" - rules.append(union_grammar_rule) - if len(union_rules) == 1: - gbnf_type = f"{model_name}-{field_name}-optional" - else: - gbnf_type = f"{model_name}-{field_name}-union" - elif isclass(origin_type) and issubclass(origin_type, str): - if field_info and hasattr(field_info, "json_schema_extra") and field_info.json_schema_extra is not None: - triple_quoted_string = field_info.json_schema_extra.get("triple_quoted_string", False) - markdown_string = field_info.json_schema_extra.get("markdown_code_block", False) - - gbnf_type = PydanticDataType.TRIPLE_QUOTED_STRING.value if triple_quoted_string else PydanticDataType.STRING.value - gbnf_type = PydanticDataType.MARKDOWN_CODE_BLOCK.value if markdown_string else gbnf_type - - elif field_info and hasattr(field_info, "pattern"): - # Convert regex pattern to grammar rule - regex_pattern = field_info.regex.pattern - gbnf_type = f"pattern-{field_name} ::= {regex_to_gbnf(regex_pattern)}" - else: - gbnf_type = PydanticDataType.STRING.value - - elif ( - isclass(origin_type) - and issubclass(origin_type, float) - and field_info - and hasattr(field_info, "json_schema_extra") - and field_info.json_schema_extra is not None - ): - # Retrieve precision attributes for floats - max_precision = ( - field_info.json_schema_extra.get("max_precision") if field_info and hasattr(field_info, - "json_schema_extra") else None - ) - min_precision = ( - field_info.json_schema_extra.get("min_precision") if field_info and hasattr(field_info, - "json_schema_extra") else None - ) - max_digits = field_info.json_schema_extra.get("max_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - min_digits = field_info.json_schema_extra.get("min_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - - # Generate GBNF rule for float with given attributes - gbnf_type, rules = generate_gbnf_float_rules( - max_digit=max_digits, min_digit=min_digits, max_precision=max_precision, min_precision=min_precision - ) - - elif ( - isclass(origin_type) - and issubclass(origin_type, int) - and field_info - and hasattr(field_info, "json_schema_extra") - and field_info.json_schema_extra is not None - ): - # Retrieve digit attributes for integers - max_digits = field_info.json_schema_extra.get("max_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - min_digits = field_info.json_schema_extra.get("min_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - - # Generate GBNF rule for integer with given attributes - gbnf_type, rules = generate_gbnf_integer_rules(max_digit=max_digits, min_digit=min_digits) - else: - gbnf_type, rules = gbnf_type, [] - - return gbnf_type, rules - - -def generate_gbnf_grammar(model: type[BaseModel], processed_models: set[type[BaseModel]], created_rules: dict[str, list[str]]) -> tuple[list[str], bool]: - """ - - Generate GBnF Grammar - - Generates a GBnF grammar for a given model. - - :param model: A Pydantic model class to generate the grammar for. Must be a subclass of BaseModel. - :param processed_models: A set of already processed models to prevent infinite recursion. - :param created_rules: A dict containing already created rules to prevent duplicates. - :return: A list of GBnF grammar rules in string format. And two booleans indicating if an extra markdown or triple quoted string is in the grammar. - Example Usage: - ``` - model = MyModel - processed_models = set() - created_rules = dict() - - gbnf_grammar = generate_gbnf_grammar(model, processed_models, created_rules) - ``` - """ - if model in processed_models: - return [], False - - processed_models.add(model) - model_name = format_model_and_field_name(model.__name__) - - if not issubclass(model, BaseModel): - # For non-Pydantic classes, generate model_fields from __annotations__ or __init__ - if hasattr(model, "__annotations__") and model.__annotations__: - model_fields = {name: (typ, ...) for name, typ in get_type_hints(model).items()} - else: - init_signature = inspect.signature(model.__init__) - parameters = init_signature.parameters - model_fields = {name: (param.annotation, param.default) for name, param in parameters.items() if - name != "self"} - else: - # For Pydantic models, use model_fields and check for ellipsis (required fields) - model_fields = get_type_hints(model) - - model_rule_parts = [] - nested_rules = [] - has_markdown_code_block = False - has_triple_quoted_string = False - look_for_markdown_code_block = False - look_for_triple_quoted_string = False - for field_name, field_info in model_fields.items(): - if not issubclass(model, BaseModel): - field_type, default_value = field_info - # Check if the field is optional (not required) - is_optional = (default_value is not inspect.Parameter.empty) and (default_value is not Ellipsis) - else: - field_type = field_info - field_info = model.model_fields[field_name] - is_optional = field_info.is_required is False and get_origin(field_type) is Optional - rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, format_model_and_field_name(field_name), field_type, is_optional, processed_models, - created_rules, field_info - ) - look_for_markdown_code_block = True if rule_name == "markdown_code_block" else False - look_for_triple_quoted_string = True if rule_name == "triple_quoted_string" else False - if not look_for_markdown_code_block and not look_for_triple_quoted_string: - if rule_name not in created_rules: - created_rules[rule_name] = additional_rules - model_rule_parts.append(f' ws "\\"{field_name}\\"" ":" ws {rule_name}') # Adding escaped quotes - nested_rules.extend(additional_rules) - else: - has_triple_quoted_string = look_for_triple_quoted_string - has_markdown_code_block = look_for_markdown_code_block - - fields_joined = r' "," "\n" '.join(model_rule_parts) - model_rule = rf'{model_name} ::= "{{" "\n" {fields_joined} "\n" ws "}}"' - - has_special_string = False - if has_triple_quoted_string: - model_rule += '"\\n" ws "}"' - model_rule += '"\\n" triple-quoted-string' - has_special_string = True - if has_markdown_code_block: - model_rule += '"\\n" ws "}"' - model_rule += '"\\n" markdown-code-block' - has_special_string = True - all_rules = [model_rule] + nested_rules - - return all_rules, has_special_string - - -def generate_gbnf_grammar_from_pydantic_models( - models: list[type[BaseModel]], outer_object_name: str | None = None, outer_object_content: str | None = None, - list_of_outputs: bool = False -) -> str: - """ - Generate GBNF Grammar from Pydantic Models. - - This method takes a list of Pydantic models and uses them to generate a GBNF grammar string. The generated grammar string can be used for parsing and validating data using the generated - * grammar. - - Args: - models (list[type[BaseModel]]): A list of Pydantic models to generate the grammar from. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - list_of_outputs (str, optional): Allows a list of output objects - Returns: - str: The generated GBNF grammar string. - - Examples: - models = [UserModel, PostModel] - grammar = generate_gbnf_grammar_from_pydantic(models) - print(grammar) - # Output: - # root ::= UserModel | PostModel - # ... - """ - processed_models: set[type[BaseModel]] = set() - all_rules = [] - created_rules: dict[str, list[str]] = {} - if outer_object_name is None: - for model in models: - model_rules, _ = generate_gbnf_grammar(model, processed_models, created_rules) - all_rules.extend(model_rules) - - if list_of_outputs: - root_rule = r'root ::= (" "| "\n") "[" ws grammar-models ("," ws grammar-models)* ws "]"' + "\n" - else: - root_rule = r'root ::= (" "| "\n") grammar-models' + "\n" - root_rule += "grammar-models ::= " + " | ".join( - [format_model_and_field_name(model.__name__) for model in models]) - all_rules.insert(0, root_rule) - return "\n".join(all_rules) - elif outer_object_name is not None: - if list_of_outputs: - root_rule = ( - rf'root ::= (" "| "\n") "[" ws {format_model_and_field_name(outer_object_name)} ("," ws {format_model_and_field_name(outer_object_name)})* ws "]"' - + "\n" - ) - else: - root_rule = f"root ::= {format_model_and_field_name(outer_object_name)}\n" - - model_rule = ( - rf'{format_model_and_field_name(outer_object_name)} ::= (" "| "\n") "{{" ws "\"{outer_object_name}\"" ":" ws grammar-models' - ) - - fields_joined = " | ".join( - [rf"{format_model_and_field_name(model.__name__)}-grammar-model" for model in models]) - - grammar_model_rules = f"\ngrammar-models ::= {fields_joined}" - mod_rules = [] - for model in models: - mod_rule = rf"{format_model_and_field_name(model.__name__)}-grammar-model ::= " - mod_rule += ( - rf'"\"{model.__name__}\"" "," ws "\"{outer_object_content}\"" ":" ws {format_model_and_field_name(model.__name__)}' + "\n" - ) - mod_rules.append(mod_rule) - grammar_model_rules += "\n" + "\n".join(mod_rules) - - for model in models: - model_rules, has_special_string = generate_gbnf_grammar(model, processed_models, - created_rules) - - if not has_special_string: - model_rules[0] += r'"\n" ws "}"' - - all_rules.extend(model_rules) - - all_rules.insert(0, root_rule + model_rule + grammar_model_rules) - return "\n".join(all_rules) - - -def get_primitive_grammar(grammar): - """ - Returns the needed GBNF primitive grammar for a given GBNF grammar string. - - Args: - grammar (str): The string containing the GBNF grammar. - - Returns: - str: GBNF primitive grammar string. - """ - type_list: list[type[object]] = [] - if "string-list" in grammar: - type_list.append(str) - if "boolean-list" in grammar: - type_list.append(bool) - if "integer-list" in grammar: - type_list.append(int) - if "float-list" in grammar: - type_list.append(float) - additional_grammar = [generate_list_rule(t) for t in type_list] - primitive_grammar = r""" -boolean ::= "true" | "false" -null ::= "null" -string ::= "\"" ( - [^"\\] | - "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) - )* "\"" ws -ws ::= ([ \t\n] ws)? -float ::= ("-"? ([0] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws - -integer ::= [0-9]+""" - - any_block = "" - if "custom-class-any" in grammar: - any_block = """ -value ::= object | array | string | number | boolean | null - -object ::= - "{" ws ( - string ":" ws value - ("," ws string ":" ws value)* - )? "}" ws - -array ::= - "[" ws ( - value - ("," ws value)* - )? "]" ws - -number ::= integer | float""" - - markdown_code_block_grammar = "" - if "markdown-code-block" in grammar: - markdown_code_block_grammar = r''' -markdown-code-block ::= opening-triple-ticks markdown-code-block-content closing-triple-ticks -markdown-code-block-content ::= ( [^`] | "`" [^`] | "`" "`" [^`] )* -opening-triple-ticks ::= "```" "python" "\n" | "```" "c" "\n" | "```" "cpp" "\n" | "```" "txt" "\n" | "```" "text" "\n" | "```" "json" "\n" | "```" "javascript" "\n" | "```" "css" "\n" | "```" "html" "\n" | "```" "markdown" "\n" -closing-triple-ticks ::= "```" "\n"''' - - if "triple-quoted-string" in grammar: - markdown_code_block_grammar = r""" -triple-quoted-string ::= triple-quotes triple-quoted-string-content triple-quotes -triple-quoted-string-content ::= ( [^'] | "'" [^'] | "'" "'" [^'] )* -triple-quotes ::= "'''" """ - return "\n" + "\n".join(additional_grammar) + any_block + primitive_grammar + markdown_code_block_grammar - - -def generate_markdown_documentation( - pydantic_models: list[type[BaseModel]], model_prefix="Model", fields_prefix="Fields", - documentation_with_field_description=True -) -> str: - """ - Generate markdown documentation for a list of Pydantic models. - - Args: - pydantic_models (list[type[BaseModel]]): list of Pydantic model classes. - model_prefix (str): Prefix for the model section. - fields_prefix (str): Prefix for the fields section. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation. - """ - documentation = "" - pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models] - for model, add_prefix in pyd_models: - if add_prefix: - documentation += f"{model_prefix}: {model.__name__}\n" - else: - documentation += f"Model: {model.__name__}\n" - - # Handling multi-line model description with proper indentation - - class_doc = getdoc(model) - base_class_doc = getdoc(BaseModel) - class_description = class_doc if class_doc and class_doc != base_class_doc else "" - if class_description != "": - documentation += " Description: " - documentation += format_multiline_description(class_description, 0) + "\n" - - if add_prefix: - # Indenting the fields section - documentation += f" {fields_prefix}:\n" - else: - documentation += f" Fields:\n" # noqa: F541 - if isclass(model) and issubclass(model, BaseModel): - for name, field_type in get_type_hints(model).items(): - # if name == "markdown_code_block": - # continue - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: - element_types = get_args(field_type) - for element_type in element_types: - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - documentation += generate_field_markdown( - name, field_type, model, documentation_with_field_description=documentation_with_field_description - ) - documentation += "\n" - - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - documentation += f" Expected Example Output for {format_model_and_field_name(model.__name__)}:\n" - json_example = json.dumps(model.Config.json_schema_extra["example"]) - documentation += format_multiline_description(json_example, 2) + "\n" - - return documentation - - -def generate_field_markdown( - field_name: str, field_type: type[Any], model: type[BaseModel], depth=1, - documentation_with_field_description=True -) -> str: - """ - Generate markdown documentation for a Pydantic model field. - - Args: - field_name (str): Name of the field. - field_type (type[Any]): Type of the field. - model (type[BaseModel]): Pydantic model class. - depth (int): Indentation depth in the documentation. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation for the field. - """ - indent = " " * depth - - field_info = model.model_fields.get(field_name) - field_description = field_info.description if field_info and field_info.description else "" - - origin_type = get_origin(field_type) - origin_type = field_type if origin_type is None else origin_type - - if origin_type == list: - element_type = get_args(field_type)[0] - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)} of {format_model_and_field_name(element_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - elif origin_type == Union: - element_types = get_args(field_type) - types = [] - for element_type in element_types: - types.append(format_model_and_field_name(element_type.__name__)) - field_text = f"{indent}{field_name} ({' or '.join(types)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - else: - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - - if not documentation_with_field_description: - return field_text - - if field_description != "": - field_text += f" Description: {field_description}\n" - - # Check for and include field-specific examples if available - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - field_example = model.Config.json_schema_extra["example"].get(field_name) - if field_example is not None: - example_text = f"'{field_example}'" if isinstance(field_example, str) else field_example - field_text += f"{indent} Example: {example_text}\n" - - if isclass(origin_type) and issubclass(origin_type, BaseModel): - field_text += f"{indent} Details:\n" - for name, type_ in get_type_hints(field_type).items(): - field_text += generate_field_markdown(name, type_, field_type, depth + 2) - - return field_text - - -def format_json_example(example: dict[str, Any], depth: int) -> str: - """ - Format a JSON example into a readable string with indentation. - - Args: - example (dict): JSON example to be formatted. - depth (int): Indentation depth. - - Returns: - str: Formatted JSON example string. - """ - indent = " " * depth - formatted_example = "{\n" - for key, value in example.items(): - value_text = f"'{value}'" if isinstance(value, str) else value - formatted_example += f"{indent}{key}: {value_text},\n" - formatted_example = formatted_example.rstrip(",\n") + "\n" + indent + "}" - return formatted_example - - -def generate_text_documentation( - pydantic_models: list[type[BaseModel]], model_prefix="Model", fields_prefix="Fields", - documentation_with_field_description=True -) -> str: - """ - Generate text documentation for a list of Pydantic models. - - Args: - pydantic_models (list[type[BaseModel]]): List of Pydantic model classes. - model_prefix (str): Prefix for the model section. - fields_prefix (str): Prefix for the fields section. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation. - """ - documentation = "" - pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models] - for model, add_prefix in pyd_models: - if add_prefix: - documentation += f"{model_prefix}: {model.__name__}\n" - else: - documentation += f"Model: {model.__name__}\n" - - # Handling multi-line model description with proper indentation - - class_doc = getdoc(model) - base_class_doc = getdoc(BaseModel) - class_description = class_doc if class_doc and class_doc != base_class_doc else "" - if class_description != "": - documentation += " Description: " - documentation += "\n" + format_multiline_description(class_description, 2) + "\n" - - if isclass(model) and issubclass(model, BaseModel): - documentation_fields = "" - for name, field_type in get_type_hints(model).items(): - # if name == "markdown_code_block": - # continue - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: - element_types = get_args(field_type) - for element_type in element_types: - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - documentation_fields += generate_field_text( - name, field_type, model, documentation_with_field_description=documentation_with_field_description - ) - if documentation_fields != "": - if add_prefix: - documentation += f" {fields_prefix}:\n{documentation_fields}" - else: - documentation += f" Fields:\n{documentation_fields}" - documentation += "\n" - - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - documentation += f" Expected Example Output for {format_model_and_field_name(model.__name__)}:\n" - json_example = json.dumps(model.Config.json_schema_extra["example"]) - documentation += format_multiline_description(json_example, 2) + "\n" - - return documentation - - -def generate_field_text( - field_name: str, field_type: type[Any], model: type[BaseModel], depth=1, - documentation_with_field_description=True -) -> str: - """ - Generate text documentation for a Pydantic model field. - - Args: - field_name (str): Name of the field. - field_type (type[Any]): Type of the field. - model (type[BaseModel]): Pydantic model class. - depth (int): Indentation depth in the documentation. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation for the field. - """ - indent = " " * depth - - field_info = model.model_fields.get(field_name) - field_description = field_info.description if field_info and field_info.description else "" - - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)} of {format_model_and_field_name(element_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - elif get_origin(field_type) == Union: - element_types = get_args(field_type) - types = [] - for element_type in element_types: - types.append(format_model_and_field_name(element_type.__name__)) - field_text = f"{indent}{field_name} ({' or '.join(types)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - else: - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - - if not documentation_with_field_description: - return field_text - - if field_description != "": - field_text += f"{indent} Description: " + field_description + "\n" - - # Check for and include field-specific examples if available - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - field_example = model.Config.json_schema_extra["example"].get(field_name) - if field_example is not None: - example_text = f"'{field_example}'" if isinstance(field_example, str) else field_example - field_text += f"{indent} Example: {example_text}\n" - - if isclass(field_type) and issubclass(field_type, BaseModel): - field_text += f"{indent} Details:\n" - for name, type_ in get_type_hints(field_type).items(): - field_text += generate_field_text(name, type_, field_type, depth + 2) - - return field_text - - -def format_multiline_description(description: str, indent_level: int) -> str: - """ - Format a multiline description with proper indentation. - - Args: - description (str): Multiline description. - indent_level (int): Indentation level. - - Returns: - str: Formatted multiline description. - """ - indent = " " * indent_level - return indent + description.replace("\n", "\n" + indent) - - -def save_gbnf_grammar_and_documentation( - grammar, documentation, grammar_file_path="./grammar.gbnf", documentation_file_path="./grammar_documentation.md" -): - """ - Save GBNF grammar and documentation to specified files. - - Args: - grammar (str): GBNF grammar string. - documentation (str): Documentation string. - grammar_file_path (str): File path to save the GBNF grammar. - documentation_file_path (str): File path to save the documentation. - - Returns: - None - """ - try: - with open(grammar_file_path, "w") as file: - file.write(grammar + get_primitive_grammar(grammar)) - print(f"Grammar successfully saved to {grammar_file_path}") - except IOError as e: - print(f"An error occurred while saving the grammar file: {e}") - - try: - with open(documentation_file_path, "w") as file: - file.write(documentation) - print(f"Documentation successfully saved to {documentation_file_path}") - except IOError as e: - print(f"An error occurred while saving the documentation file: {e}") - - -def remove_empty_lines(string): - """ - Remove empty lines from a string. - - Args: - string (str): Input string. - - Returns: - str: String with empty lines removed. - """ - lines = string.splitlines() - non_empty_lines = [line for line in lines if line.strip() != ""] - string_no_empty_lines = "\n".join(non_empty_lines) - return string_no_empty_lines - - -def generate_and_save_gbnf_grammar_and_documentation( - pydantic_model_list, - grammar_file_path="./generated_grammar.gbnf", - documentation_file_path="./generated_grammar_documentation.md", - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation, and save them to specified files. - - Args: - pydantic_model_list: List of Pydantic model classes. - grammar_file_path (str): File path to save the generated GBNF grammar. - documentation_file_path (str): File path to save the generated documentation. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - None - """ - documentation = generate_markdown_documentation( - pydantic_model_list, model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar) - save_gbnf_grammar_and_documentation(grammar, documentation, grammar_file_path, documentation_file_path) - - -def generate_gbnf_grammar_and_documentation( - pydantic_model_list, - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation for a list of Pydantic models. - - Args: - pydantic_model_list: List of Pydantic model classes. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - tuple: GBNF grammar string, documentation string. - """ - documentation = generate_markdown_documentation( - copy(pydantic_model_list), model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar + get_primitive_grammar(grammar)) - return grammar, documentation - - -def generate_gbnf_grammar_and_documentation_from_dictionaries( - dictionaries: list[dict[str, Any]], - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation from a list of dictionaries. - - Args: - dictionaries (list[dict]): List of dictionaries representing Pydantic models. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - tuple: GBNF grammar string, documentation string. - """ - pydantic_model_list = create_dynamic_models_from_dictionaries(dictionaries) - documentation = generate_markdown_documentation( - copy(pydantic_model_list), model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar + get_primitive_grammar(grammar)) - return grammar, documentation - - -def create_dynamic_model_from_function(func: Callable[..., Any]): - """ - Creates a dynamic Pydantic model from a given function's type hints and adds the function as a 'run' method. - - Args: - func (Callable): A function with type hints from which to create the model. - - Returns: - A dynamic Pydantic model class with the provided function as a 'run' method. - """ - - # Get the signature of the function - sig = inspect.signature(func) - - # Parse the docstring - assert func.__doc__ is not None - docstring = parse(func.__doc__) - - dynamic_fields = {} - param_docs = [] - for param in sig.parameters.values(): - # Exclude 'self' parameter - if param.name == "self": - continue - - # Assert that the parameter has a type annotation - if param.annotation == inspect.Parameter.empty: - raise TypeError(f"Parameter '{param.name}' in function '{func.__name__}' lacks a type annotation") - - # Find the parameter's description in the docstring - param_doc = next((d for d in docstring.params if d.arg_name == param.name), None) - - # Assert that the parameter has a description - if not param_doc or not param_doc.description: - raise ValueError( - f"Parameter '{param.name}' in function '{func.__name__}' lacks a description in the docstring") - - # Add parameter details to the schema - param_docs.append((param.name, param_doc)) - if param.default == inspect.Parameter.empty: - default_value = ... - else: - default_value = param.default - dynamic_fields[param.name] = ( - param.annotation if param.annotation != inspect.Parameter.empty else str, default_value) - # Creating the dynamic model - dynamic_model = create_model(f"{func.__name__}", **dynamic_fields) - - for name, param_doc in param_docs: - dynamic_model.model_fields[name].description = param_doc.description - - dynamic_model.__doc__ = docstring.short_description - - def run_method_wrapper(self): - func_args = {name: getattr(self, name) for name, _ in dynamic_fields.items()} - return func(**func_args) - - # Adding the wrapped function as a 'run' method - setattr(dynamic_model, "run", run_method_wrapper) - return dynamic_model - - -def add_run_method_to_dynamic_model(model: type[BaseModel], func: Callable[..., Any]): - """ - Add a 'run' method to a dynamic Pydantic model, using the provided function. - - Args: - model (type[BaseModel]): Dynamic Pydantic model class. - func (Callable): Function to be added as a 'run' method to the model. - - Returns: - type[BaseModel]: Pydantic model class with the added 'run' method. - """ - - def run_method_wrapper(self): - func_args = {name: getattr(self, name) for name in model.model_fields} - return func(**func_args) - - # Adding the wrapped function as a 'run' method - setattr(model, "run", run_method_wrapper) - - return model - - -def create_dynamic_models_from_dictionaries(dictionaries: list[dict[str, Any]]): - """ - Create a list of dynamic Pydantic model classes from a list of dictionaries. - - Args: - dictionaries (list[dict]): List of dictionaries representing model structures. - - Returns: - list[type[BaseModel]]: List of generated dynamic Pydantic model classes. - """ - dynamic_models = [] - for func in dictionaries: - model_name = format_model_and_field_name(func.get("name", "")) - dyn_model = convert_dictionary_to_pydantic_model(func, model_name) - dynamic_models.append(dyn_model) - return dynamic_models - - -def map_grammar_names_to_pydantic_model_class(pydantic_model_list): - output = {} - for model in pydantic_model_list: - output[format_model_and_field_name(model.__name__)] = model - - return output - - -def json_schema_to_python_types(schema): - type_map = { - "any": Any, - "string": str, - "number": float, - "integer": int, - "boolean": bool, - "array": list, - } - return type_map[schema] - - -def list_to_enum(enum_name, values): - return Enum(enum_name, {value: value for value in values}) - - -def convert_dictionary_to_pydantic_model(dictionary: dict[str, Any], model_name: str = "CustomModel") -> type[Any]: - """ - Convert a dictionary to a Pydantic model class. - - Args: - dictionary (dict): Dictionary representing the model structure. - model_name (str): Name of the generated Pydantic model. - - Returns: - type[BaseModel]: Generated Pydantic model class. - """ - fields: dict[str, Any] = {} - - if "properties" in dictionary: - for field_name, field_data in dictionary.get("properties", {}).items(): - if field_data == "object": - submodel = convert_dictionary_to_pydantic_model(dictionary, f"{model_name}_{field_name}") - fields[field_name] = (submodel, ...) - else: - field_type = field_data.get("type", "str") - - if field_data.get("enum", []): - fields[field_name] = (list_to_enum(field_name, field_data.get("enum", [])), ...) - elif field_type == "array": - items = field_data.get("items", {}) - if items != {}: - array = {"properties": items} - array_type = convert_dictionary_to_pydantic_model(array, f"{model_name}_{field_name}_items") - fields[field_name] = (List[array_type], ...) - else: - fields[field_name] = (list, ...) - elif field_type == "object": - submodel = convert_dictionary_to_pydantic_model(field_data, f"{model_name}_{field_name}") - fields[field_name] = (submodel, ...) - elif field_type == "required": - required = field_data.get("enum", []) - for key, field in fields.items(): - if key not in required: - optional_type = fields[key][0] - fields[key] = (Optional[optional_type], ...) - else: - field_type = json_schema_to_python_types(field_type) - fields[field_name] = (field_type, ...) - if "function" in dictionary: - for field_name, field_data in dictionary.get("function", {}).items(): - if field_name == "name": - model_name = field_data - elif field_name == "description": - fields["__doc__"] = field_data - elif field_name == "parameters": - return convert_dictionary_to_pydantic_model(field_data, f"{model_name}") - - if "parameters" in dictionary: - field_data = {"function": dictionary} - return convert_dictionary_to_pydantic_model(field_data, f"{model_name}") - if "required" in dictionary: - required = dictionary.get("required", []) - for key, field in fields.items(): - if key not in required: - optional_type = fields[key][0] - fields[key] = (Optional[optional_type], ...) - custom_model = create_model(model_name, **fields) - return custom_model diff --git a/examples/pydantic_models_to_grammar_examples.py b/examples/pydantic_models_to_grammar_examples.py deleted file mode 100755 index eb000d5cc..000000000 --- a/examples/pydantic_models_to_grammar_examples.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env python3 - -"""Function calling example using pydantic models.""" - -from __future__ import annotations - -import argparse -import datetime -import json -import logging -import textwrap -import sys -from enum import Enum -from typing import Optional, Union - -import requests -from pydantic import BaseModel, Field -from pydantic_models_to_grammar import (add_run_method_to_dynamic_model, convert_dictionary_to_pydantic_model, - create_dynamic_model_from_function, generate_gbnf_grammar_and_documentation) - - -def create_completion(host, prompt, gbnf_grammar): - """Calls the /completion API on llama-server. - - See - https://github.com/ggerganov/llama.cpp/tree/HEAD/examples/server#api-endpoints - """ - print(f" Request:\n Grammar:\n{textwrap.indent(gbnf_grammar, ' ')}\n Prompt:\n{textwrap.indent(prompt.rstrip(), ' ')}") - headers = {"Content-Type": "application/json"} - data = {"prompt": prompt, "grammar": gbnf_grammar} - result = requests.post(f"http://{host}/completion", headers=headers, json=data).json() - assert data.get("error") is None, data - logging.info("Result: %s", result) - content = result["content"] - print(f" Model: {result['model']}") - print(f" Result:\n{textwrap.indent(json.dumps(json.loads(content), indent=2), ' ')}") - return content - - -# A function for the agent to send a message to the user. -class SendMessageToUser(BaseModel): - """Send a message to the User.""" - chain_of_thought: str = Field(..., description="Your chain of thought while sending the message.") - message: str = Field(..., description="Message you want to send to the user.") - - def run(self): - print(f"SendMessageToUser: {self.message}") - - -def example_rce(host): - """Minimal test case where the LLM call an arbitrary python function.""" - print("- example_rce") - tools = [SendMessageToUser] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="function_parameters", model_prefix="Function", fields_prefix="Parameters") - system_message = "You are an advanced AI, tasked to assist the user by calling functions in JSON format. The following are the available functions and their parameters and types:\n\n" + documentation - user_message = "What is 42 * 42?" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - tools_map = {tool.__name__:tool for tool in tools} - # This finds "SendMessageToUser": - tool = tools_map.get(json_data["function"]) - if not tool: - print(f"Error: unknown tool {json_data['function']}") - return 1 - tool(**json_data["function_parameters"]).run() - return 0 - - -# Enum for the calculator tool. -class MathOperation(Enum): - ADD = "add" - SUBTRACT = "subtract" - MULTIPLY = "multiply" - DIVIDE = "divide" - - -# Simple pydantic calculator tool for the agent that can add, subtract, -# multiply, and divide. Docstring and description of fields will be used in -# system prompt. -class Calculator(BaseModel): - """Perform a math operation on two numbers.""" - number_one: Union[int, float] = Field(..., description="First number.") - operation: MathOperation = Field(..., description="Math operation to perform.") - number_two: Union[int, float] = Field(..., description="Second number.") - - def run(self): - if self.operation == MathOperation.ADD: - return self.number_one + self.number_two - elif self.operation == MathOperation.SUBTRACT: - return self.number_one - self.number_two - elif self.operation == MathOperation.MULTIPLY: - return self.number_one * self.number_two - elif self.operation == MathOperation.DIVIDE: - return self.number_one / self.number_two - else: - raise ValueError("Unknown operation.") - - -def example_calculator(host): - """Have the LLM ask to get a calculation done. - - Here the grammar gets generated by passing the available function models to - generate_gbnf_grammar_and_documentation function. This also generates a - documentation usable by the LLM. - - pydantic_model_list is the list of pydantic models outer_object_name is an - optional name for an outer object around the actual model object. Like a - "function" object with "function_parameters" which contains the actual model - object. If None, no outer object will be generated outer_object_content is - the name of outer object content. - - model_prefix is the optional prefix for models in the documentation. (Default="Output Model") - fields_prefix is the prefix for the model fields in the documentation. (Default="Output Fields") - """ - print("- example_calculator") - tools = [SendMessageToUser, Calculator] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="function_parameters", model_prefix="Function", fields_prefix="Parameters") - system_message = "You are an advanced AI, tasked to assist the user by calling functions in JSON format. The following are the available functions and their parameters and types:\n\n" + documentation - user_message1 = "What is 42 * 42?" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message1}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - expected = { - "function": "Calculator", - "function_parameters": { - "number_one": 42, - "operation": "multiply", - "number_two": 42 - } - } - if json_data != expected: - print(" Result is not as expected!") - tools_map = {tool.__name__:tool for tool in tools} - # This finds "Calculator": - tool = tools_map.get(json_data["function"]) - if not tool: - print(f"Error: unknown tool {json_data['function']}") - return 1 - result = tool(**json_data["function_parameters"]).run() - print(f" Call {json_data['function']} gave result {result}") - return 0 - - -class Category(Enum): - """The category of the book.""" - Fiction = "Fiction" - NonFiction = "Non-Fiction" - - -class Book(BaseModel): - """Represents an entry about a book.""" - title: str = Field(..., description="Title of the book.") - author: str = Field(..., description="Author of the book.") - published_year: Optional[int] = Field(..., description="Publishing year of the book.") - keywords: list[str] = Field(..., description="A list of keywords.") - category: Category = Field(..., description="Category of the book.") - summary: str = Field(..., description="Summary of the book.") - - -def example_struct(host): - """A example structured output based on pydantic models. - - The LLM will create an entry for a Book database out of an unstructured - text. We need no additional parameters other than our list of pydantic - models. - """ - print("- example_struct") - tools = [Book] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(pydantic_model_list=tools) - system_message = "You are an advanced AI, tasked to create a dataset entry in JSON for a Book. The following is the expected output model:\n\n" + documentation - text = """The Feynman Lectures on Physics is a physics textbook based on some lectures by Richard Feynman, a Nobel laureate who has sometimes been called "The Great Explainer". The lectures were presented before undergraduate students at the California Institute of Technology (Caltech), during 1961–1963. The book's co-authors are Feynman, Robert B. Leighton, and Matthew Sands.""" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - # In this case, there's no function nor function_parameters. - # Here the result will vary based on the LLM used. - keys = sorted(["title", "author", "published_year", "keywords", "category", "summary"]) - if keys != sorted(json_data.keys()): - print(f"Unexpected result: {sorted(json_data.keys())}") - return 1 - book = Book(**json_data) - print(f" As a Book object: %s" % book) - return 0 - - -def get_current_datetime(output_format: Optional[str] = None): - """Get the current date and time in the given format. - - Args: - output_format: formatting string for the date and time, defaults to '%Y-%m-%d %H:%M:%S' - """ - return datetime.datetime.now().strftime(output_format or "%Y-%m-%d %H:%M:%S") - - -# Example function to get the weather. -def get_current_weather(location, unit): - """Get the current weather in a given location""" - if "London" in location: - return json.dumps({"location": "London", "temperature": "42", "unit": unit.value}) - elif "New York" in location: - return json.dumps({"location": "New York", "temperature": "24", "unit": unit.value}) - elif "North Pole" in location: - return json.dumps({"location": "North Pole", "temperature": "-42", "unit": unit.value}) - return json.dumps({"location": location, "temperature": "unknown"}) - - -def example_concurrent(host): - """An example for parallel function calling with a Python function, a pydantic - function model and an OpenAI like function definition. - """ - print("- example_concurrent") - # Function definition in OpenAI style. - current_weather_tool = { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - }, - } - # Convert OpenAI function definition into pydantic model. - current_weather_tool_model = convert_dictionary_to_pydantic_model(current_weather_tool) - # Add the actual function to a pydantic model. - current_weather_tool_model = add_run_method_to_dynamic_model(current_weather_tool_model, get_current_weather) - - # Convert normal Python function to a pydantic model. - current_datetime_model = create_dynamic_model_from_function(get_current_datetime) - - tools = [SendMessageToUser, Calculator, current_datetime_model, current_weather_tool_model] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="params", model_prefix="Function", fields_prefix="Parameters", list_of_outputs=True) - system_message = "You are an advanced AI assistant. You are interacting with the user and with your environment by calling functions. You call functions by writing JSON objects, which represent specific function calls.\nBelow is a list of your available function calls:\n\n" + documentation - text = """Get the date and time, get the current weather in celsius in London and solve the following calculation: 42 * 42""" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - expected = [ - { - "function": "get_current_datetime", - "params": { - "output_format": "%Y-%m-%d %H:%M:%S" - } - }, - { - "function": "get_current_weather", - "params": { - "location": "London", - "unit": "celsius" - } - }, - { - "function": "Calculator", - "params": { - "number_one": 42, - "operation": "multiply", - "number_two": 42 - } - } - ] - res = 0 - if json_data != expected: - print(" Result is not as expected!") - print(" This can happen on highly quantized models") - res = 1 - tools_map = {tool.__name__:tool for tool in tools} - for call in json_data: - tool = tools_map.get(call["function"]) - if not tool: - print(f"Error: unknown tool {call['function']}") - return 1 - result = tool(**call["params"]).run() - print(f" Call {call['function']} returned {result}") - # Should output something like this: - # Call get_current_datetime returned 2024-07-15 09:50:38 - # Call get_current_weather returned {"location": "London", "temperature": "42", "unit": "celsius"} - # Call Calculator returned 1764 - return res - - -def main(): - parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__) - parser.add_argument("--host", default="localhost:8080", help="llama.cpp server") - parser.add_argument("-v", "--verbose", action="store_true", help="enables logging") - args = parser.parse_args() - logging.basicConfig(level=logging.INFO if args.verbose else logging.ERROR) - ret = 0 - # Comment out below to only run the example you want. - ret = ret or example_rce(args.host) - ret = ret or example_calculator(args.host) - ret = ret or example_struct(args.host) - ret = ret or example_concurrent(args.host) - return ret - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/examples/reason-act.sh b/examples/reason-act.sh deleted file mode 100755 index 06d592799..000000000 --- a/examples/reason-act.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -cd `dirname $0` -cd .. - -# get -m model parameter otherwise defer to default -if [ "$1" == "-m" ]; then - MODEL="-m $2 " -fi - -./llama-cli $MODEL --color \ - -f ./prompts/reason-act.txt \ - -i --interactive-first \ - --top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7 -c 2048 \ - -r "Question:" -r "Observation:" --in-prefix " " \ - -n -1 diff --git a/examples/regex_to_grammar.py b/examples/regex_to_grammar.py deleted file mode 100644 index 5cd9210a4..000000000 --- a/examples/regex_to_grammar.py +++ /dev/null @@ -1,20 +0,0 @@ -import json, subprocess, sys, os - -assert len(sys.argv) >= 2 -[_, pattern, *rest] = sys.argv - -print(subprocess.check_output( - [ - "python", - os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "json_schema_to_grammar.py"), - *rest, - "-", - "--raw-pattern", - ], - text=True, - input=json.dumps({ - "type": "string", - "pattern": pattern, - }, indent=2))) diff --git a/examples/server-llama2-13B.sh b/examples/server-llama2-13B.sh deleted file mode 100755 index 4ce79b7fa..000000000 --- a/examples/server-llama2-13B.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -# Specify the model you want to use here: -MODEL="${MODEL:-./models/llama-2-13b-chat.ggmlv3.q5_K_M.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat-system.txt} - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-12}" - -# Note: you can also override the generation options by specifying them on the command line: -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}" - - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./llama-server $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --rope-freq-scale 1.0 \ - "$@" - -# I used this to test the model with mps, but omitted it from the general purpose. If you want to use it, just specify it on the command line. -# -ngl 1 \ diff --git a/examples/server_embd.py b/examples/server_embd.py deleted file mode 100644 index 0e34c6cea..000000000 --- a/examples/server_embd.py +++ /dev/null @@ -1,35 +0,0 @@ -import asyncio -import asyncio.threads -import requests -import numpy as np - - -n = 8 - -result = [] - -async def requests_post_async(*args, **kwargs): - return await asyncio.threads.to_thread(requests.post, *args, **kwargs) - -async def main(): - model_url = "http://127.0.0.1:6900" - responses: list[requests.Response] = await asyncio.gather(*[requests_post_async( - url= f"{model_url}/embedding", - json= {"content": str(0)*1024} - ) for i in range(n)]) - - for response in responses: - embedding = response.json()["embedding"] - print(embedding[-8:]) - result.append(embedding) - -asyncio.run(main()) - -# compute cosine similarity - -for i in range(n-1): - for j in range(i+1, n): - embedding1 = np.array(result[i]) - embedding2 = np.array(result[j]) - similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2)) - print(f"Similarity between {i} and {j}: {similarity:.2f}") diff --git a/examples/ts-type-to-grammar.sh b/examples/ts-type-to-grammar.sh deleted file mode 100755 index 9abba2a3d..000000000 --- a/examples/ts-type-to-grammar.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# -# ./examples/ts-type-to-grammar.sh "{a:string,b:string,c?:string}" -# python examples/json_schema_to_grammar.py https://json.schemastore.org/tsconfig.json -# -set -euo pipefail - -readonly type="$1" - -# Create a temporary directory -TMPDIR="" -trap 'rm -fR "$TMPDIR"' EXIT -TMPDIR=$(mktemp -d) - -DTS_FILE="$TMPDIR/type.d.ts" -SCHEMA_FILE="$TMPDIR/schema.json" - -echo "export type MyType = $type" > "$DTS_FILE" - -# This is a fork of typescript-json-schema, actively maintained as of March 2024: -# https://github.com/vega/ts-json-schema-generator -npx ts-json-schema-generator --unstable --no-top-ref --path "$DTS_FILE" --type MyType -e none > "$SCHEMA_FILE" - -# Alternative, not actively maintained as of March 2024: -# https://github.com/YousefED/typescript-json-schema -# npx typescript-json-schema --defaultProps --required "$DTS_FILE" MyType | tee "$SCHEMA_FILE" >&2 - -./examples/json_schema_to_grammar.py "$SCHEMA_FILE" diff --git a/include/llama.h b/include/llama.h index 413070d95..ff95f6929 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1163,7 +1163,7 @@ extern "C" { // Performance information LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx); - LLAMA_API void llama_print_timings(struct llama_context * ctx); + LLAMA_API void antigma_print_timings(struct llama_context * ctx); LLAMA_API void llama_reset_timings(struct llama_context * ctx); // Print system information diff --git a/src/llama.cpp b/src/llama.cpp index da7bcb113..8016ae981 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19097,17 +19097,17 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) { return result; } -void llama_print_timings(struct llama_context * ctx) { +void antigma_print_timings(struct llama_context * ctx) { const llama_timings timings = llama_get_timings(ctx); LLAMA_LOG_INFO("\n"); - // LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms); - // LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); - // LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); - // LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); + LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms); + LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); + LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); + LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); LLAMA_LOG_INFO("Antigma timer: total time = %10.2f ms / %5d tokens\n", (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval)); }

dP5b2Scj^{|f?5Y^AfmyS&Fpl;mOHZxgeX=_I#zR!XXrhW>&Z908T7!&GdJnMr zegGI(cT3+Ee{1*i-nzYHd=ta`H&v;;palpAvG=wkcy`4iXvl)c+ah3=jQ*0+6RPA5 z@(OuX_Uo4*oo!5Ap#X!&_I;d49Ixh93Cjo89W`yA^qc25r;bHF?<%zeTB8GO22?uF zESC6V%Z%DHD!?ywzf8QY`R$<24fR*qjWX?Y7aDcWVmsq_%G{K7#Q!-oxRPwq2=AYeMdkmRl@Lg`P0Av`qwcb;(d@p3s6+O=N9?O#O`4DJ>(ro9p}$FCea61 z=hX;d#GY5>e!U~#5t}rvTu#)8DcrBOw2^1}ItG_%c-wGDlrcy<{G{TPQ||P}R%d`3 z@qv};xMC>drfg0>N#T>Dj4Oaj>FY*Zj}5}tOdb|6%a>3>A)X!IncHo5^KBml`TK6s z%n*w^TEYc6HjJOO@{Q3-F%|7Is)96&UR@six| z@qlcC`Ft6c6+(Ia ze4?+U%vV46nZ{x`!y@!Xw+5J7e6+Ph6fJcZ2Eu$sWt(chV*WRWu>9N>H=Kowa!KQX zoJ2Q0@-&{eNwCE=Cu}rRb*Z9My@_MWrsIaCdW~12Xx#3!Uu>+2lcD8dSq@x^YR|`p zCAq^12q~a1grjfv7n^&@GC%+Pl!FQAqz_&r_E0R2wogIMI3PJ07Vs*Qo@U@1dAhe= z^C7*3DuPFQm{T1u>g8vOXD+`iS-E9&hW6_C?A6Zj>zWhxnLqHw*KXBddZxA7isIN1 zD?e;#dMG&lPFi6_!i+A#v5v33u5%g%^nV%R$g}U_V08^3kQDR2O2nE|wsNe}jKP)_ z9>DGA`OgwiWsma<|1;xc1uYgC@ge<=upy!h4Haz{mq-4VrUmB>0@{#L%n=v8Z2T`j zrB6E-Qcs#%(?HQUr-Zw1utLBic%N;wvo3N-@hLuOS_?aAEVQR}*x~Y0_(_5Cj_&is@z&Q1D{Uu4*qq~zkpO10{^TVgE zmus!~^AGpbSL+0JEwgE6r5_wr*7O6+j_I?CO@iZfF=?TgoL z#FdB>0j&`qkYC3Q^jP5QszRCVv-7BD=1nz~X2zX5Qy$feNj4yQMU9BMUJ?T;p5B2S zSTSN`NCbC5@U`@IcaFkbvbv6NPP#oUn2NJc=rOA{E8jsQan3t$Us#asDn;uo-)xi+ z2*Ttr_P2`|{klqLy1+p|MbYp(MKXI@_b#O+nhNr0QxH*q@q=>@s+F6x5t8K$%!Pf~ zY$&hGwQ*#e_2?<}8n!@NA{)F{``+iPR4;=c`dLZfbNY@Nd(dt$j0)F|--J@%cA}>^ zU=rknpCfVjKI834I3^AC(;rRu3Wrs0D=@Et9zfp)YMqS9Ky)c7s{u!j25CKuD@&>t z57bZkg zer|(T74~WwiPxy&bN|7&I3a@&Wx4tJ}&IRNeI8481$s;Iy?JfI*ulRX8Y}lUyl7fSRc$ z^ENhN3-7=#a3P*cr9>c8nuFPHcUGKDS z%vAr>TZppEp8qo}(|%?+&FuCg=Q7u*L*UOJ&1F)Ii5o-y0q zuN>8tUkR?G?P2(;S&QL3<9td;5(FnhRF<_Bi>=M2PGJ zN@zUS0~p&%cl${6GnadyU$geP8(2PmBwKbSrrdJ%B7@rBL^@p_@h{HFe5v)W;ocpI zX~rqa-3<{AHMRp$WYXPl^LFU{#tul}#GH#}#DC|Vhn?W6pU!n4&H#6ci5YszA{eFW zvvO9Gb@bgbor`GHTcw{qC6SI^~ND3j~{<_TU^ZObkp z9ksQc);-X{vZCPjmQ$N>ugpJCDv;I-G}*zg^W=V}1}!es%14)%OJLvFTH0|?GNG5i z;FLN^OTARz3fzesx!Hm`4tU2AbFCFTSItz~_3l6GJgvXm2VM2k<5Rz*Kdh3C(Nt43G)utcK(Ml~GR*%E zm0K1=HHkLiyPLRdXxS~3p^94m_!i*DWd8mUr+fS zQyIa=k=Uk){Ym$T6oi>`d8jgt&Wc%>Zsn)A*0>cz!W zWFXe=wM3l0_snrqSC1qx-$kKHNl1w*oQO!(w5b zYuLis8hS(Ng;l=u>QUeo(M1PJ0~yW@1}%n*s95O(US!KH%pswAM*L{=Ijp5m-c%R| z!sCMnZFiHMI&w$huOA#>mQ9{^7Nz!YZ!V#O^Eu({U*8+vhN6W_r7*dW3GRyTCTc0P z`kqWgZWIXVGaTXd#?8<7IthxvMrkm&*?peCC0T0rG4zsgdF(t~fJ>w?ac`($V^fSx zBvs7NZd0j#Llsp)>o9xTvU91ARV+n2KqAw;RB-V8_zPm7++k^T@gNTC$iOwLnUv-p ze;&?O(M`!bJ={|~_{dfsE1kAVk-Ec03P;gI#q}0xrOX0j_NU9_GWh~v#;_V_p^wX#WOnO5;+mb(65Wjz-pnd7x=8P@vi8wr z4u!xBe|(G%YaJageVKZd+JZX^;AphP@*o98dE+G4>#K=Ehl4D<5RKXt-6fdulQt|mD!SqD!^Yvtp$ssH3!ag!dh6_6f*>V4Skly@Ey`Nyd8(d6t zx1JWX{wF&PBFC>9<~M&r;xn1sM+fV)I*3ql4_{*w7@etEc0(_$6dJICOoezC*1}7+XzI(lwT!{Kad$?Kr+?-~8a8nDu-jU;@!9y4g{N^KBgoble8p3X`MpiOCZT*dskxvEp!iDmDA zk{(XHTX4^`QnY_Gx1?>`X>syEM)X}`q|ZM;WwCpX!`-e|5p6mr+ZEHd7^U@RAx| zrC*}A)Yo-1wly~~Im2sda44R35;$Zi7JS{eC-9ea*=}xDok{`846Z1v_TY?&+jE|W z+~eI=b~VqA$lxx0(qf zW!Om>-==z9#SJ{0@0y%Y;36-RHDEwaxFnK74< zFL&anoX3^C_xrGLtu2cK9Y%t(x+0D~W#jr+O0~4K@R`zR>bBwT16BE}ls}0;n3)cI zGftHZdxbx^`<_S`NFONj78ujIjQv$n6%FnXWc)4j1Obx!rzqgTTXG2DfdssHv4g^v>M+LtFo_8w^N1guI3za zH+ASc{h5_RW8mZ;z;AP}l|BhQ#M5ANaI4|str4aL zJH)tzKynist`W)TduL`*hF}X>a&5#&pXozQ)vZW<=_Io#_d?#|i)~>R3E$_;vG^-1 zN)I4HYr_yXVyaKq_6N`Z7ST96l3~`(yUz&`HYN`c9iyTfy6APryVdK3vr$ zAk%IBZ}6LSab!9e9MMf(sI*UdPZ!`cFO{wC#XXSnhqg)Idl>2$fdkEM7j!Uk+%?$3 zp-D1{^VROn+dCcJCU2ABR3O8xY$qHdxn>A~Gw2>Fep+*j^v&_82>+f49GvGawpWdT zfoI<6Gtmr)1DQl(3(c(;ZqLfbDAdC*w>8}{_;<`e5Kl$gbgmc_+8EzpmYB}-BsqcE zPhkC3duHc59jZ~5{yvNL>HPw@lVqdoq)P<*^c9Al%i!>%zQ7DpO$T#z_Kr|uxNDHY4xdsp@-xY$94@|BXa z$X4QTbBl!~gY=2*1gt0Ry zco66_B)L-m{29k|=sbW>agBK@KW1Q0iTjilB({F-iwPj|@kcYU_VI3Q$vQf|O*o{E zYvLG~+RWk;MC|(&h`mCVy8K5qjlN;q6sh}$a#W<0Jy0tKo{$@ir_Q=uhMx`RC-191 zrXik@?(+%`Dz`4Hs$p(}tk6wz5t7j<&UI-%0zqHwQ~2ng;~}MM;Si|$M!koXnpA{@ zp_CNe3Ahr8p%hW=2+4$fJie!VjpnA^bz_o|j?jE;mlxko_Z8DH0-nch*PcNx;l1-; z+R)UH6Z$p*Xuk3Zq5S=Xlw+l&*Oaz{?f$U0brDL$($4#>*@DIab5#;$0qw5AzW&zh zG#87lT-NEt6og^T6jqJI&jHNa*FQd4k4BbJ$i=3PI>raN_yfjeB$UAK=TaMsL5{ci zsB+HSgC)v*HKyodFi-KpGEw=;TF(sl_J;`y0wv;!{jdcsE8fN5)ke3R%_tXXMWN+yqAjs zP6Iz&L`Xy;(38%b>H#=i!}C6tgVrSBo6O}4OQFMiY`Gl)Mh-O@)N5>!a>O-x$Ez{p z`_U<(K*j}DZd9)NI8IrzvL5lW!Rn+3I)6CQA(MMY(k3oY$N_2 zi~Nm5YwD&Q1}IWXocY6h!JYVrh_8y*_^+O&7KHit)}@~easWRHgi8)Cn{4hJ@t$SQ zp+97U$%7nS*UA29J>yH>YY!>nycPK)QM@>BP@6l8lYQzy_L!ck#HBK+SP(gg3hdJo znGY%g6Vo}n^V^whtS3d?>sDAihxiI!>ukVFVP7P8<`lP zY{d2=b=`w!N4oNfD^9f{2Esh$JzIIRxMloHjP$aPH|3q@F%wK*gont0tGF8zBz?Mm zmgfynVyH6`)EyTjVe2i*a4!%zw!$v$jSy*I!@S;Uw*}Jg+2LbaS8Sb{C%AzAcs;)c zu$ZnW)YKWoNx#PHjok&i`l+6O<=1aC`%CLsRcN;EXM8@3J%CyHfdxLO_L#o@TEi_$ z0$Ra7ODcvYF7SZUhK7huyCXK%}^d4r>TzqJAsWDua zJ-qKzxGa(tAf*g=H>d6|HhmxJQg?L6T1-2liuq_+YV+>4+$lv+NLecV^LOcXlsNn` zgv00JOP$SvkPP7X`EBfh>?U!MQj8)(;_(OZgu3%D(ejJv=3BHld~sCtd3?9gq)JI+ zF-VTTRh{^lsOwA;AshJ-PewFt;(mVSxwiZxf0Wyu776YgNj?tlhUe}$>{x#f6pn&i zYZW6szojh3TMWwHG7$g%DIw)AMXVnZxM4P?^_smIT^0<-76U4yeZoEX>{8pT&dYxG zI}2~_?iR9H_}yinx(jms&|M)FN2gu|;@uMsp$>?4C`*b!6%ic`syk@br~&?))k*3v zlRb9gjBSfKfz#BTolH5bpb~bN+r{3>La^vRzg$)gn}c@}U#6L(PV1ER2McKNWEt*V zXRa!`&4Zz_u9;6IPTN~Vp$Ih4D5?c79NT=Zb=THaTa4AzX&-CcPCt-1E;Ks>~ z>d6YA*gsH0;V}qu0?=YqDT9W}MvEC}Pmu-gKrc3FYr+XChFLFCynibVv@@=e$u$5tYL10M3)6`jeIp_zU;smiC#c`tzY`g;+3$ zj`_u$4Nj6Hv(lWOfC~on6pBbsiEmm%$Z11= zm|gU{FH7avo6d5KAJ76*TxmbYv^dCWqm1l}1{&7p7WeGra(= zTW`S>(*SO5I}-Dz={t`rkTeLUt5>?>N`00+-B`aER6ld)j&3v?q^CGcp5Cu{x@owi z7K>0dh1zl9M*Ra!&_82o@u=kzfmbr@+2X`cqUM8H1f@EI%}}VebXBe^bZd|jbS=Gp z2lBaQwVzb_T=F4WnZX-gbichZ0ag@G+6pZ87IAJSdz`o6_=lPb5w%6z#xdZ9{%Fgi z8DgPOc=ns4*@~}do$yXqo!&I;w@8KYb_+xk#3yz;!W&dOtV4f?-k8MR6Ge1Xtg~fh zET~hqefEAZot%-z;QUFo(*3b$mirV+pWJQQipoRqc0)5(wWZ;SKa!Gp$ zvv!yUD~>Sp<Ll?R6-PJHLmr|kh)*N@kQJk8q*rFKMmwd@l8VTdci zR=>9&N9inQ3_=tK{ZS=9M{{Kv!Pm1#jWj_z9>BVgsT*!@3=7Qt@%)NFk0WsoKWpaA zZW0V3!Q%(uk5{v)LS_-OOiV;HSYBJ2MC4~p3?XUpYYapktCKQBv;odKsz;I_=T;Fw zEm;-+>ua@KAv=gVm6?mEOYI|lU66N}gYodmR-IKR?6Du3Eyy#S&$>Q^xrW&0Qpfyy z9)$d&;^nNdAQu?%9N>KZ8}piA_$?_V)DQvhG1l)wwYxV);upT|F6s-zuk(&)7R=0v z!3$`6VY+IkI~#l-aW29Odq|(-J!h?KR&B#E6L!-p{6xSGG)<_*%XF^y@iEQSDbl?R znwcF~;s?i{AkZ>Z`aakJMY6Jp6ikXJ5Gi5^SEngt>rJ)7as3%JCJhQ2Zm7>r(cczB z)pvV51`f`=&v;YvyVz{evfB_aDdzDkiR|?i}~W9V`sS;Gnfy8fnQ))H%D}Sp>HlPX7>2a5Yf3?cd?vJr$RyS zL)-+cyY1?a3|Nq5;Xpvm(9!G(0!&4cCt)Sy?eu0|=2g9exv}>@KuQEUh?!@|tYhlb z*l~-X8slI-hYP`li9uiDbH{jsl1)r%XlE%F$r5l5Ns&8W?od!%#VW!gh-Om){>{%2 z{DRf~bZ$UG!6?=2<{8^C5Hu1-MT|m@qOJ!-lc(RBcst^85t0Q_-jf751DYcapW|D2 zjo}!e37)xEH}t=K{K&6gmQc{d7_y+WS0&pqQf)5So2Tlvt6j1# z-E(N1zpGR29|R}79wgaccJob_7N2EAIS6^&oUiubGH;MNFEM7LwFpB(;V0dzyY@Aq zKUrzU1s9{D43a@XM$?U=6RnGb90CIStGEGz6Vq#2mnZTZEHnf%rW$zGxS7Y>zjB=@ zlg#LRC=e%oy1ytYqUW|JMRBLjd3~ZcBVF3~_1*#yMHu#_dqDOi9pe0JjFE-1#duY;Scn?#Y4vizC@$)? zhK>4BLuJzT1Or9YqTDIwUnGbXsqjWP0&AX-8G9yEAE4`s#c4IHf zuWSgp(KX^y)H&<-&zMfG%kJJfeY;&28Psgr3N%Cppkv)s%njSWsjpf)fP_9|Nn>O= zhryHS+so0$j?JG3mV%#7%n^5GCGDC-_o$;_=QfN#dj}7*HPvs6tYwBxsk0S1hJcx( z1sxUM^Z9O7q8j@%B&%{{wW&(M;tl!HTtZ>SeIeH$8tm&cnLhrRY^?c-axON2PX=|k z`CZDGs@nAa$NNQIA(Iv0sHUrt#B2M~b_25zS74HI6via}kv?l%VZx_FKs@I{7&Y9q zI*YZ(rSIeyaA>{(5m9!~Cb#U7eMb&}lkL@g;{Fh_YfWBp5Aaw-0)>fHN?0emk7$dx z=<0WZlTogb+uEw><4ztlwe7q_Wa#1zotdn8%DvU(Mg}9=FPeZ`+JxK%F?+I==;tfJ zQKw`B-s8S*du5IJxSsEKNFQps?_vc`X4Tih+akwC@0HI|pEfoHD&@VYq|Dy!{_J{{ zB}NgyA<}P4x*B#I@PE2uUuGRX)gT|0l#; zg8$tdJG~S_k?>jcU`W&G7Yx-n9`R-ZN1Dg85d1}gId!Yz;W-KVGY|I@MshL6y($b_ zZ-$ycZRV#c&_*Hntbvz^FvXWjJdtwPE_Yes;au&aWu)2bh1k;SmDe^LPEOECtIJBW zfp;0H3qUIwDxC+3nEgQiA0+c$zrQpzgg=SSmVFM{7Y%7-897;Nz?l){NbAA3Iqor8 z-5s4!)+FKo>s|cA?oW4>fxaWT=4Bd)cK|6>0HcP)Oty_cpSla>#4_U?oG<4Awc za;scuEY)(G9F*7F;uNU?VzuaKpF6vsb!4Rur1?7T#K{O8DoaXS zMMfUY^hxM^Rx*eg(P|aHb*3wTog&3_o=)E&>0s;--;ewQEX|o==YRS==bELb7;wDA zlM4)g7SA$efj`#-gXC?!wDnxuJmznRD}5f_3P;<5ULYsMSeHg3oMZ*0*z$E6&*Qw*4otXZ?ZY@dyt!_l| zqojbTuILcVi-FYWBy*rM;WT!IQv|z5{O(&GsvF~nxSc@mj*08n$8QD6R6pZ!blvc) zZiI4-5xu96@!Hze1pOolKFpQAk>GqbZh4U+)GIfB5Ho8n1r7%-t|^pLH-E%pm^YSt z7JI$D@{^Mks+`aiP-rTHL$pG>r4zHCGa9g~W{mT}nbvQTs5RNu5ds;rW+$wPSJ$H1JK79%*a;P*n2xPNF&!$M3o*7$Eh*bp zgwX}3*wSJL3$s_6>~{Hd;VX9wyBLmJGwIF4eGRVvCa;B*$i2Mcp8FE?0f_$bhqi4> zq25+oDT+t}RnJZ_k#9xi_e}%utio8|@o!{lAdPTd1NV{tIJCwZUKDPoZ0SfSn?-wR zQvMW~D6RU{y9Ms_)?A`$5VZNYrfp^5TG80X`*(w5pW2gQ_O5cFSBuJh);d<+F{GW? zHcvLSwZ6N$fK|lPK>%9B#Zq1Sgf%aD@UwwC3}-lO;`|Bwrg|=yVF)dct6xGvYdip! zl8S(%4rG@}7xUIV*vFm@cW&qXF;d9lUtjzOKwO?f{WoAQ9EVvLvyfy-!QOpp;pZuY zZeaQkn*EUxxw&^k%8~3#vH+X z%W8!xcgxz|Cv0#K?Os4%#&fIx`mSue9Acd2lh8F)6RSoP58e6IoVFE_|uA78vi zFZcGC{|7kAKsngBq?kSMe|PcP{HY1yR}OZYQ!vqdRx(^XZ4|+Jo zBDH(O$uwSRi)8XYfLG$gt{l&sz427tab}Fj&&yNlMVMitw-41SqzCny4NCeB-6Wk# zExI~E(5LWE7oV`)dD>l03ESRN8xs{fYC_SQHm4m87uPU*rNx~V2LQKr84F6ItR^y4 z%?Tz_Ik-M=L=h)`te#zs?dVp@~jj%W}dNz)2{6~HmDes>_4#C%UVW={Rj=3xNCln7Vf9r zY3Vwp-wD_dv%1RLGD8`;PJtExxr%M{+EqR(RULP`wBQ4<=;LxzTcdbd^Be~)l&--C_K1DN-(Ujg0_aa|eK zS=d#7E&-5|A~#0!&*;&{6zVD+jwDDVu5`3r*1rF8O-pjF*(W^!-|m4SL3X{yRqjY* z4~+a{IE_gClxhq>15F1hnrZXKNTj`iDyWZpP5Ef=p8Ey{d=RjcDY17!0{!Us>!r)UN(70 ze9lFHr4!PvgUf6-bqben2{Wj}HY(+HdwX4ZgX87%{e}(Y(zubE)u7O4K zG?-#>GF%gnu7T6-b1~%~J%C=BbXi5iVbxK;QXNqO`GK*nM>>y&ooIWx^hw9>G}Jwv zuZ2Cz;EWfq_R@UOq9Luabn2HvaR9Ob3{Vx%mf4r~>6@Vj*~>|s@MUPAsU>K-GKyl~ zV=$@Wg!rIEw43}Pm~IHpiVJs-^esgLQTIe#7T$*zuQsKGNs9(4*~|F)`vq?cS>qb5 z!-0Lb&$f-VrNeWPTD+AFU;E(tlx3TQ*+=?%qWKJ~TALoi5j*>nXYLA2$)w`W)em$T zTToh2XrZ;2qg={@LfE7A_nFqVd3V1)y@8esP>A2B0vo|78mXgqKrWT1T`uctAsigkU=BPq z+Y*3ZUOR@y{ekiK{;d~Lxma=f?!``nd^&_<`CWGGv*$Ll+S#iLhI_W~6cF(UaJ+bF z=n(qXU%UMNx5Q-JABgX3RQy(b_HPkfD!WD>p2aWlvbQ@4Me~bke`tOB!Ci@SGQlq* zcvnGMuU}foHNs>mqFdef;yZs#MHQb!G_pX6Xv`Dq^wEe{Qm+M#=^~UH5y?21;fzMTYZ2gBRtq z==&M&mqNiM2`8Li&x_|ilp~0dlabg#dPcP(k?WNeL&nYHykSf3Ks0M(p<&NvR8Ox< zH^e zgRTN$X!@n72r2mQuYwWxK3fTxg;^)$wgcb9iYIrhE=d|nY-_y|&h-q6hVm(XknUo% z1%z7by*7XNU3c9=nA~J#1o^79oLOI8(#7D#GRwd3bT?+{Wu)zE;%e)$?zn`vz=(qq zJ0m(53JTVfg{92j9+x+gyZu@gk@k6EVOBv_dEaBhr?XE!hnT+PYEC9I8y-SB()Y>4aV1d7*|0Zx!s+KzO{(=d>_Z5 zwU2?p5WlKfD*E<5kjY%92&HmC3 z1OZCsu#qTykXjR}2OZz#nw}>W^1>EsM_kt_X+X5u(3Za;FYbH{Mm62H;5ORVk3Yv! zQ*QYetmoc?&e1_bQgUx{9deLL22|=DK*?sJ$u8O7h%j$ROO(@u+$6ILMa5m~3ZHzC zol8p@YNDSUu{hIm?~9Nj(&9X#^IHl(GxIqJ;;B4T~a$0OO>mp#>%egH-*jRj&? z_T9Yt!UBni{ImaW<`Ny&xe0j{P>G}_iRG+*G(m)k#O(a`c27}qH%T(q6KhZ??y^$A5VZX2!xj%6D!FE zEhRWFWNcUUfBn%;ge5TU6Z?(Pj@^~>E3X>Es=BWmu(aQ8`Azg!NA*2fnN&O6Q7lx- zOpNVC5_h{~S+izbgu{@gi7k&nhZ}v>fewY?sU{kFLsL^OBvrE*CheH6i>LdO?lEzw zQhgWV;Aqy+o_&1LG9t>|b#jt^z`kNbXy{+L{8BbAJ%mcx71Q|<2s2djcIg=@{q3)! z`7R|gPtQMFfjQ^K$wgme#5GaZ!;rz>R{b4Y2#FBcvT0$m#3NEv;!1dpp)}V7Bw=U6aOV~mMEK9qbYISyJ>;3%ou`|rNr>`` z`~tJJ{qGhF*UWgnYv6og0$j-B3P>)t^X^JSMPtD}s;K)r%Lk;1(M1ZF2)dHfV@!){ zgMEXPGi^v1D!fd#^3jV)sVHsi#I7R#lz7AXZuHy82Qu_abT2X#kYf7k^cmZk>H&fn z!xbMtIJ5&S5nc$px6+w$Ya60s!tQ0YtBi6^kO4w`n!Zmgrx;i$n?#Z3X#kl#o}dhs z7{F*NBmCo=zoA&X;|xZ@`-&aE)i_ey8pU6Krfk!)?3FF9>FN@x19QShn;f~k9JUe# z*!D-VyTXo)q>nsmjFxKRU&kRJ=L{uPe4Xdl=~-D*x-_Y5VF{9r{1gWBqntHSo6m;9G1g`c3%fgnA#TZbc(y#5A?b$Ea$?ETe(N)s4 z$$kC-vJ1U=u9mImIlKat*ELbN6{R;gv(NsBT!daz-7I?Xsa4OgEn~%x3_h$j4*+1D z^BI|mjT4tl^y9}q5zBplwNV^%PtG*S$N?GVcv&~O##%dA+rxb_`-RvsT{(vc6f`DZF%;G|m(jf-D zJ;XK;%{I-8a*+ZaR!?j+dleSPe-}afyuhPMi-Ob) z03KMCV5fKMOr0m`_%EmsUHI%q;IbWEd>5_^d@N!wFQyAUE-ND*ah)!=nmPzKW~81uSdRY~&HgZa$K?7& z$>R?}eXtPv?C-Nbrz)x~UC<)ikpYo-Y`)2X^V4L!pBCf>!4A4&)G(H#lWe{6V*R6Z zc)n-3%BK`PH#2i=(qK*gwYs_1s9VU-BqN(R-F&|pfgKMi)SBn!XWMpPTQ9aZbFb+X zf?`5YvJplnuX`Ki0t-ETau+t5Rw|1M8^rtJO}uX31pVl0j_aM*X3fy!b_-6X<%n40 z5@eNbX3cLK%N*$!@et}I0&)g3;L>F$H=GKV9c}g>Mb1?VUgRI_qGai<6s9a$12?>^ zTc&(z9)>2UEL@))NK=LKT`0h@cnlgve6HkDPQ~eK9qW_TwjDS~0yneO9yb?|wjHrI z3=WaEY2VB#HPgkoURP{V9Yr4SJ+X{FG3lfcVm-YSg z)zFfEn=)rX4xK0QSkUrY=g9H1sVT~UZ8!ZsxIV8SFvJ!0*PPgA(e?a?xsBt$1et-j zw-Am+hp$C*5x#2~V8M4_nO!iW#T{Asvdnjg*(?sC0*XQ>e9E{HO`bGVaV%Wnncc^1 zeoXKJ_@;aOq$_8gHju9+8#HH`WCVI*#K-!as1}k95Ye|~QcdQd!6*`SYSR1g;vAGWNh0jUA4&8mVPf=v9CqpV=demjozPSLai~@(7egfWk@=U4y&wh zz1i6YXVvFEv@&xzzZW8@8IXv2ax3+F`>JOZwZ-7X!2qdiWj9L_ow9C##qhJ-jl$B_ zmxdHZIokXIOq1(P;6QV-wRC=hPvXy_Tz1*4%px#kEy=a`eV7c)(CQ*=%<6oV56zR* z;q9fdl*cy$_D|5pZ;E(d&Nr+6+P>WmCf+_ z>Z57YD@RA2pK;>97<8ecD|oGqRI7E!T%be|j_KBltrSGiQ<+R41s=pqNIk35IoI^W z)4)=XGOp<+v+`^u|IHmJ?T*X8rM~Vy1j*YNjCaeLO&~NLkLC03j(7(?!zs-w%q|=s zVv)iZYMyPO?k^ac=UWJy_~`st&uAc;Xi#Tsfc!Ga>w03XkH1T21HrIEd{DBi1AVv9 zZ~miLc!P`68alI%!-Ico;zC3n{mf|>9RN?P3p%M8^k*nUfzKL0u>#Kh!^FnsPV4W|1|R1u!>d?(drs*JzO90>8R<$#5Tq1jx3-vKv{8Kf zRh0#Ons!#Sv0?eraqjf?v0=SHmS*YAnzU=UW7Jv`32ir)mu`FvFPt{* zGUcP?A|X2pY2%sl3K>V7+>y544u12hTJKxJPPt>akdEN5PjJNRd0$SOqHm}v^W`wNPk!$bcB>#+GNn_*1cqnLd;|O57)vHj)>qkM@)8}%nNZW zE>yLr0aQ2saZsjDSZI;1e!bSbFD!;Sg1|3AjKbWL|7^Y$>r*LZR5VA-`=PF(e%Ii` zLpEb@Ry|z+*)-_hR(i))Ty#C2?lh_;u3&O7o49qSomWkI&yfRZsFs;1+58hYC;^oZZ;8( zcfaLd^})U@<6{`JmzPs98~Lp;TkoxZ96qKSs9x9jQii9zniy=`7QLO`^PYjVAdDX- z5&*FrjP%GUM-~hgnn{qPtkZ|>69wik(A1Ue%q`}-ow!y^(+0i%m1KpAUy2xC3}<1z zx!l7H#GTl)YSh91%Pe5;&_Ky~e9XB|lz9=Pn~8!NXV;vDY# zEQ=wAw0~2n4dXdmYG^}qI88Kau)QFEoa8Hc=4(ID)A{n6!RTz_;ttLs-dK0 zsMn9_usa$iRw0jx`|U=PbfVwEM4XAUxNz^hZdb@m_VGC@Ye_!-^vtsa@t*n*ls^QJj2d;7^W@JB|rbjeaB2-25kLK*wAA-q$ z7%uOk*2wbKB4EpN3a=#A;qTen(6z`&AspN~ZAH3Q7_raXu3HM--sZX$?6NwGSG!rt zy)XDaJNE80$I8-TXVV=C24*K&^qu$4+l?D|1z`$gq%M${FQR=!9Z#MlS4BC~_r4k} zQ;xE7kkx^;Iz(6;VaWZ08+k)o@`QM}>KBHgK(SL5Pz=kgn{AMLzCNLV=lIY}5_ z3C9%#9$Z&CD$Hzm&MwcN5|Y0+?)nEHpn~tUb>v37JhqNl_>!D-zFJs>m54$kE-t(* z_@87?e>Apc3qE0bE^tduQC61>RSuVU0Jy0Xr{oa*nn+&xR&k-(IJP1U|6q{%&fM7;9-;i`Qo>+ zmEXBgul;_+#BikWfSslVI>vxua!mz)DetWi)-MTP@{oHqaLMKA8^7={?@(3dednU^ zDMyahHDnU30&d%Ro|e6<0e^$hmGsveFt}++!}k!9I;PlH3srtqMD>HL9;@3UR--DnZ&PB55^f^tJv<$iveN)=hKd@#jnf^9#}ne`lT z@sSj=A9h1*@y2B4)Sw_J$qel$82%KUV+k*OO z)|WSr@^rsXF-Fr$FGuxdAP$3tOOkadBE6Lp3Mx6u@{rja!GR8oV%!Itb-R$7etYHL ziP1c>?wMyFJeSd`maZ2IBl31eL}+E;kM1nYPRBGF;A%k`-^LZ7pA5*{&lWA7td>W) zkNap;!~g`?5+@X072P7O1SV=k0`&fpaZ87CQW$X>sp&ONI)wg`q1p8bnYpS^yg_jq zj&cw<6_Ijt0$zJyD=?8#z{nmggVJ05&^8s*Py@}vkOC^b%CjV8weTdvUnPiP0IpG8 zbsKFadtNuQ=AcAnA4IIipgZp=E}128Ih7A^$7KFJWDozha1Fv=HC*gQZ3P>;R}_A; zoLCis7B~20M(=o5S4>@l_DL>dN-MxR8+!LP%eS%>W^q!{eSm`fGxwuOx5_YBKcC0C z?6gG!W*)h!RSIn>qB_K{^jV;@`p{B1dsAjyc|ytR5%Bg|$lck*aT`KM=wBeqpq{_S zTtNIB^vp8y=cB&<25&r!MZA$sZ;fx$YHyh=Ktg8ANa^0^p#g%oetLj@@MT|}e?rpG z;3874w(guxZK;nYY}o6$-Y^1-CFtZz+g{)>Z{ma6q2F0yy)IHd{=+}O1<&7Bk?n&! zr;U|9l%DSJkr<)awpC|C&jJHzmnlX|H@sm*f3?4fdgpO7;6ihHL&DD2tq_H^nxdaM zJnFIB1;n2GMYZ9{Jy+xDX!u3u{u+^$a9CQR&l~@KbP|P506LnfE0`stcE6y6WAEUj zjn7tvPBgW;b6k+JHm_rl?>v?x?+i$$eYZK%{?o> zEs@J5oa$IOo4c6CNDecRuFN+ywDsHHd4$+}#67v=&Jv(hY4FC6Xjr0e($|hFlb7aw z^5j3dE=FT6RoC}qhJ4f51lQd=Zb3{i>%C9diL*J$N455a^IabZ5#WR^oB%u}-h3a3 zJ7@HkdPa1D8G}jpmurS2-GEo-U)ht{ft*~(QOZtpXPu_FEfo9l6A*g=twgtn${$7B zCrngUCgq1IwvAX)vtoXKzB29vv~yuS25C7oIO5%>qfhggKe6t=BF#WXjl9!UVRpVb z=y&)So*zp7&09cihV;?NcH+jLfh%F8Z4Vx>1y0#bZ72EE$=)y2@%BsIF9lu-X^t%~ zG+zQ1h=Blv_1lu?r3nh^*#yEyzNN2FAEggYWW@=K^<>SsY=NIO*)+fuk|I^IN9j<4 zy1RFR`mL3j^E~H!qSV2IVe`Hp9GPUjGwfHE#E+d3JHSo&jO!H(xnC9}!|tQExwkfO!C=X;KG@Lrfcmw60HiTLyC%LT zj=z7>Oapp4zHEbj>i6lbNIIbC=~?m*33CS!2gqM-{`O*mdawejzNObv=Qy%&06rH} z_999)W;5`_U3(1_gYD6cByZp?|AS16K+ej5m>=@%Tk6E;a)p?Fp0*YBXUqGlM z{oIP=SUKmhq;_F@FeI^)J>0ke{DHOrTxkB^g(LPVyaNXlBc?*3IV6fI z5j>%}Gh#1%x!Uu>W$T2I*Nv(j!(nuNBSRLzD#y#=;Oe%^6(HYUKu>GmT$&j+ z7U=u~;#z-o^V1XHgkKjBZ2)F`c+3Cz?)Yocgv^Y)Zcba+-ipfzIr^&Cc>lT*bk!OdRa*xEty@=?R7?KGHY>E}JKORw5&&VFksFIo%al@~jhFpE7h#G^%rX%5RD{Qi<4lk9c)qJHDn ztkI8FISBwWnGM};Quw1XD^J!u>GywIwLHXHC#yZme(XF+<)fVi_WCw{ zyW1-higNGcd=%@m*RyRxUYAvG#4`p8^W3+?D_+ zj344o!@d6EU!YBphwp+58&cXVwjkOnH3Z#z67ehE-WaUV(X|9+`HPIZDk23+<$0KU z&oBBY5}eu1JE*W$QfXN-y{7zH^cHEEeUqg>C$LXnqvM@&rz21D3I*WpsIm$wR}^d# zMpEkhAzEZaPsI~6iSYxUaV3h|aF+L&--T7hebzc<-1;!VtJ1(?$Jzueqpk&|&1hk{ z5*1g^ry&E^KN50mS#pksKC=tGMO6$POzXc9% zo|{X0IxUk7_ZrA|C$AtZ3avy?$`*H1Y2E}5+rLKmfA2U4I&FRpitTKo+{e$C-8=Tu ztn)NC8&s@bEs(4;>Lk(NA{kygO@6zcTT0Swc+EVJOJIzL=FQM|MoP&(_}9mk*xjW$ zJ{7K;dvu>QH+erXpamzd|C-A}e;wi1JhTL|YYz1v4Y*HV z-!jXNuf!T*Av|P^QRsX?oU%V?S{dCgM|F2J{u_I56%$l(M_fX_F zZpI`vl)LOVgyKWn{sFv&vjkN3-jMZuCEAok-P3tyQ+{)zX27o&cq+|H^4lGFhw5@7-6FW)BVCh44wnNWjo0$67=?lGwv~?op%ewv+68&;WP^W(_oErzf-Rj?{$UD}ora7XAS$RKjl=MgrEl zg=f-AFq4%pi8tVTI`AGr{$Z zB7F%Q&=R49&!mQ}gsh{^bH4@W{B*k|oE4`91s0l60fV3;1~W*n^`Eh73u>Bu=|;iq z@H=MPA71L@$!O=BL#98qO?90VX7Dr;pD7sl`i5|~%)xkgAQ-aW{tvKjES$)7da9Q6 zgIY?#!|heBp*E4lR>H;moASwn{P!tQA$QETvyU&nHsj0!mP6q;;lC>#VN`&a(re)` zu9btw=&X{W)WTJ$Lte3X5~bRqFynxOjSRI;jgHQrSZ#q=%JiB6&lj%+LW}${dk<}M z6y9(^^b+i0p*W1GTR>HE9-Wv6s=L)jCaJ!)TKw}7`%s>j?AVBEtUO>#yjD%e%-U_#i z+;7OW%=*NZK7q&VvG|}nUx=MahEHKux7Q_bWBHeg`w4hM zEZE#IHDnf(8d=Oqo6%P;6%?T-8)Tx`H;ukAqqQh?%a+KTlwI#}Nn}@7&e~!IHj8sk~=$9v2m! zuf*pcK)>#V6K@Pu_*AApx&?5(A5Y6fR@j7jbP?dpdZ&uWsC54fLU=;p@N-^Fp}g%% zs-Y70vp#Bz!I~Xm-_X*kYdapL8b7qGB(xcmOWaXv=rM3taqn0m#cV7w? z8JhKtKkwi4?Pr>U7i!j#-`2ory$5-!*@VtSBlP?5fhXu4*!7 zunh=4jB(cCg?YNuUB8NHdIfY#WqASX0I3EMG(a?`CdXPxgCXA#T`?4vA37-(1Vj=Z$4#P$j= zN98s5xiRnSUm;4d>?ZLGOJO%vEPT$;w>phumw^GiRM=kI?X(RNPMTRMu=9ffR$=WP zmy(Xql97^=hJOJ3XMPqSU7sl^Bb(=OvlRBsOo@ptF0FY@VeqlTPt~mNhopiceUM^@ za3W)QD@I=kN4LP+`#4Eq#*xDd@(J(sE$-;Z@2rX}d>~}#o(+XT1jm}BLcBdO&&FUM z_(oqi)8nBb^(rHd&(3G(m5|o)P!uBqPlKn6mls+=F+kJYT<%fT&lfB}L?of|RP7Ut zy3l!~D-}m}nuu_A<)pa9Y z-HOJH7wKRLl<1Wu37)fE1Ll)&l|e|XP5Z1%&(23b>LbItq6LB^ze0rhybzSEZU?hy z8JDOW@~RZiFn-2rLNiyU0tbXgj2leqTG)!8_%6Lp#bdf6+cAlB9P6+uKt>#sUO?Fm zqZRGE3@O@-=v8-OC|4opq`T12T8wDm^P~E)gLPn|WOil~@XaeCxYCuUi+un`G>&cd z1&gVgQDxuW_?aLHs8sX_jQ@7R^6+qT z!;`N+Ts&~>m52Mku~&AE|2x?0*#BU!00Ilm|G{1X06w&T|Nf1@cLmjd&IthE z|2FnI_CMGwI>!HCum3l&*E>MCQlY}s-3 zOwOuE8)IELtl|YY9zD$STUYB0zZwn-;ym;N3?N6?Nz)hLrdVm%;w;wX;1v^lvH;$` z;1OI|GNQrxB3MLw)&}d?Pohi<R1g@ z<5c~KQLt{WjJ4fyYiThI)F9?}TH@V?@F}Oa3rz-v+nI`pwK~Jo@XvF5nyFqk))mwg z00WXzK7O2kX+QWt?dcCR9oNTPeLuBz_1Z-c6;AWeUksK_2*fdH;Q$M;H1R&Lhp>ex z_18|G`+mCJu%qt63sS(LqAXCSYsGBf2wbJ~*VfE{vd}(w8-jMcUaT$Zxp+z!lJCYvkxQD%MiWQZoCuJwLGUk%IH(I*jw+vYH^Y!Dzz6CDQ3#~32e1zX ziMX~@z^w5Av^f)H0_3&(nJz;)hR11SsvvBRQ=_iB17I<@08u>>0 zh#o1!)NQN0H6;N(fWHNFDg3yqh6X~c=%S$y@WrXn6f4Chu(0ERFhnFSfaqtHVd$a! zp{#)JY(z<=5yp5ZofpVu`=TTzJdb%1MhtMCu(o6=707Pdck=ROOdo4d)--E%@a1ycFF+x^oq0|J zzj51SMgRbT->5;Ba4s(+ITSe0`KcCx!H^p_lel24~ z)#--BVrdbADkgwUw-WAAn2#5$tz+j_#YOn$c-Bp`(5^S7&O~ZiH&UXlYBXBWz3xT9 zZ&9EZ(Og+cY~pAR1RpZe)GLi@ItbOnlGi``zEy!td-T*Hj6gJt@{~ex&>U1bg?otdLH@H9~|qQ z>qX&aSE0^;%ET`zp9a_&>6#tCzPAJuB+)hcKeo6PhfGBqpCpO!jypmY%MI!)x`)gr zP*K~};-1SIU}sVJ=faFZQFpC2Va&yd3^bMUnhitW3pt)U6316`lYTl*j6mtHDn?w4 zs4(1$4|zA-_5$c@F0eBDpRCUD&qBMQgIZpoI~qvhG?40P5tZBqE?G63zA=lNLE#Vm ziU%uBmDG|dJZ-w;I(YAU(otkYzQfEdn-l5?hUA?KC@bd3Te)bytoZU0R90WJ^7Nz(Y_^dg4&Y4qwM|@mn9A;#Mn|f`9=_q<@Be{7Ib>ylx*~0AmZxh zl6~hPBuBOF@#l;aa>hNC#^!8PWHD^7_zC^G0s7LwSgQ&eCrJrjVvD^U*KDLNUSkA7 z#%alLj@emJcBtVtp~Lf35FaD~;osDa<&G+~^uVcFYg%d~$Lsa!fFk@rz-hu>=IL9za|pcFo!w#u6S<4KPz+iMD3Zv zOF(`u2-+I9`FyeMfnBiYCw0>+Qs*`hEFR*Y#H4I0qEahpjUW@zb?L-#2I6?8Ud}K#R84wILVDQq3sS8EaH%cBp?0 zI6r^G6@f=t>xugMI+%QArdfvf$cC+oV#PUw{0*%4YcaSH$N+gfg9A8rd}_RQt1|V` z*l4fqj_NY{4L-sg7f^Zc5 zBs@-YDo2X--ex7d4OHGWDsY~Wtc-S#KLgLg*1@hhAQa+*3BtXa^>HH9>;fXEBx)4J zYTj~rMNwZ{2Miw6nWo~-hBuoL`$F}-P>LL3v|lgq`VVXEYzxnuER}zN!~C^?>)+(o zoDYe$!Hc-Ui7vOxJJxceqoQPtm(SP^Y!ifA!)YN|6MLo6O#}WF@;KovaeHt1bCjN& zl!+}moUfmYn`rjQWl7~VWcssTHv1-p{6S9nPZVKCTCcdXc=_IF(X+9h!_BBB4&?qq z{(PHKkqW=FQygF+lLzA}n?o5DCf z6C1*&LQ*9cfbreH0=7n`=RV-y40Vb2zsnzFYI{o?_%8*SMD=v4g_W)1^sH2f;g=#l z*&y%671Y4%O-Rh{n+YWEITvJUsNYt=XXY?>{dV8}@5D(19(Tnp_?4_uFy#{}Q;;KQ z66seG*)8#2)?4j4B_ZE3*^j~)&*pk@FHFBTNAZ~(7z!X ziyuc-gkL;&%X*Qy8R6YZs}6!ZWw;RHp{TNFaT?(e6(XZ*yunk^u6{1G5P?6(yAezf zsY=0O#F?<3Q`$-||9IW`(p{1)b1@LG$y7aW2v4bB8cmu$s}hxK+*;%Jdp3gw_~Vqv zR(2yo$kiY{lmu`!_{=!g5Nz*gnwF6KZUh%ok7O7R6gbZ3%ci@Y=S80+fzuP6I^fF| zc&6d%Vky9^YT}G+&>t;ap{h1y2Q|olu(faX;xtl3&tfk0pb>}_eL<${Kb=S&gpw+D z+A(%y{ls796wP8r2F@aY{G+}qPp4geFpM9LuSDY)d>ODYH6LZFkdb152EI!CXr5CN zBXz^Tq%5)%N*q4C(zR+-v~XeV&A2r>WP-HKF7^gF2KLLrm6!(eg)On!LUYOai;6D5 z4;f@v+I;4qyK=iTl*Oyd;Mo_OvK}4iAL>9&1vjq6k$0>sl^=~2vr9r=+JHYDixF#C zPD!!=+w>%3(ws}Gxq4;HRiDJN`Ut%;ydmr!0E+^qJEXH*ZmJ8Ur(fI`p|l$qlPxoC ziqu#BwR=$CvQ{#SSawmU2|O2DkMT~TIdZ#~@B3ZjH)EZ+U%kXenbq%h8q4-7hYId1 zS9B>pcbIF{8 zX$r9i6MWIf8Fk!8sFZZC5m6Huy5CH{v&quSqrc+177njRsIpSFymM$z*Z-5xzk}w4 zebO&DNbXn*OPfQzTjSrW#Nej;2QHM3^qj@3VN^bXQ?HyBBqgU<V zYra3-U#2A(G6ERpiw}}|c#&{CX_?=#5N@?|fYXw#(>|0XIloe(+=Q1UAe3{K06Ao_DHB|^r%LE9keq8Q!m3vvY^F_JgT!J6%8PP)p0^Ez4 zx|xd#k8hukrUe8qPORx*(rFJ1f2B%%T!* zcm<)_-z>yDCpJYW|IM6Oe4G-9^VoqnKR(Xe9DX9{iRx6GE|~qkqS(Csx0A5{5Wj`B zYXC*iS(PT|bvax}FPSPkxbh{K;;8EJn=L(3KRSsBTfN_Z|bigtA*n`XI zGqst2MiS420^=+x-QaBeK=PL?7H;1)5hT#|dd;b;pu&mIU`03vt5*2+UJDO%#XKf5 z$@Vo|MrQCv@bfxHkSGx3R=Pu<3h^hLOgfrU8@eV#a(GRZ5NgD3K?>u688>7^0 zf1vNWiKc(0D*u;Nx0&LhN`LnAw}o5oZPa0WX>T_S;nX5RBW^2g@fIO3lj^N>*AKZQ zLxcMqfBymI6d^9rY#14aPEwS*-Wqfzf9pCKp4#~?pgwB4lBnT!6Fe$uFs zv-{V2SW+4XX*Y~DiK|=JMcAOIz(wPi^iC%USJGq9N@p@|7h%GZBNrE8+B&Tz^n&5d zQGCpimUhC7cGgGDW%gc&^#U1{NzkoO{d-+q{k6VgxqORJAB$ZzUS?l-l;`-zp@tVXuyIs2&d>bPB4~O>&bqR3}B5rCt{7z^?ClIE? zPSpE1l6`(ObCcuKAKKfT1H&f+gN~+BV)Ri9WXe2O@2U~xb)7C|zuw7AP zg}8q0a4{LqT>2r&28!l0)QAU-QOjyGttirt9}A(+)~`ib-89Ypa@(*a!|9z=^R(Y> z(lrIHOg9}?%f!rUlw-p~w3-wjeY5QE7Sawf^4NU=cQzhXGnkK$gU_8o2* z;KDtVNn{fG^D5ezn|2|&R9A1!rIA=odI5Y-;qR~YH!iHxIZ{gl+I~_Kv!%PviD|rc z6)c=5+zjjMA|ozG0oZ(>3_Zx4A*&i+{4F^Lq-IXF6*=B;CT&WZ@h(-rOlCU&}>d`JnFuG(jhO@kx{hPuAl`9Y9;yA~wX~yF`PQ+(uMp`-LVP)tR zs>LbOA8@5hyG;M8DC858JKpvg`^!A#Sv2G(s?2VVCPk-bYDrgm0&XJP0SprH>;%uu zx2j2v4bT575wdvaP&^hjcc71Y0ZRM6)pHWjkZ{SLN4aHi$p2!URIoWF)GOK2{7YJ! z^3&7^B3i<8p1m&N*^w^F5>0PL$st35n!G+WddW}UgyN6O!=f7SFebFu?72VP*wCZt zwxZ3VNcoedsnfr(-zis*F^5bGi!_*7(8$GX>A6?KMI#M0Q&kdJyPDLo2kLxPaL7uN zp-BONhlEB~SeaiH6P>pHd$%sU^Fu5Ezhq_|K5dviIW=|~oyN`45P%55r@4po1$>(| zL1+iT`hkv3&}tn0j+^nrA3)qI;<&s3-hQR%FW-mPSa*nsR~ErD!U_^DZhtW2dTFs& zZ!4XoQ%O?dDzf4!@*p<0Da6yKRBkOhG2%Mi`(~V0P^vhVW`OBJ^8NCYQ@WvZaO8oq zmYnV9&>@N!kM1hg3)PdC{3*7lHrRYd9f2#qe^`Uj$ZDfIyx}<@@>+08sFo3!#fbDH z<~B9rRV{cU9cWwF4-SUULVwHjEsqz_6H9&D`3G=B@mCQe{cNy`J9-SY-_4SR*_pGO zH4z^QGQMm_=$_HYNh=&5tZ-q&(kBKf-rx;k3#DE*UWbB1$} z%)O#b7~&r1iHw5wBFuMHBQRO?qj#&O@IBjYiFCYDuViIkvQM1zBsQ`f!Q^30xV-^p zFuB_)F%_WAJs%6m=Hf@wHoxU&5FH))xE+7=83nwHip~gp?WA zH-;6&)(x(8Y{UWd!k$t?7WT5-hstR z5sIxSdu|6#+Y6y}n|YFYCdShs{6RUsigV!CXp{qn6$qK8+jKxv1?0WJwO$;G{TgXW zRb(wJX%P9=fp4`kmuEu*{ii}r*V}}9?+M%6-d|tTA8UhHziocN);ClN4&niXDiPRr zo#>4h6_0%^A&**Oh4Qh`nDmn26KWyw*GA`K^OQF;yByho-E(!MKCen^TcI(cOWf?( z367&z$hCx1uKft3abWrpK=cj592igst+@wB9xeIw<__8rgU-xZWUzAdDF`&E zMuuLnNbU+x*U}_Q#dptGDf3Bq5|rN7&@-Elt5^!eZd|QoS!c=COl>BsUU1$)WLJk+ z(|`focDxUKJJG&2VUy}mr8#EvRuQ)lJZa4m`ID!H!F_jJY>zQDzKnx{Tep7z8g$NE z`eu)MN@P9hKg6)1nHh--%(q8)=s8wC zL*RHotiK1_y@o$khc@ofh_9LpPqBDr(<3z+@%N7Nb`X{=5p1Gj zQx@I%TA>uf(o*LdTeCKRFk#SB$C(=Ek~DPBjBao!%s10fZ@8BFHA%{rlE5O0|-ncz<6X}VCx=Wn|$$jF- zO{Ia#OkUjK__6;835kL?Q_Q}E-Q4I*Ek71dL1Bui*)ol898Q5FgtX=AGs~C1{FNWU z(5*$v?B_C*zLu~>A3-X`wvon4zIlURCp`y0m>dJJ*&xzmEIE_yE&}NmO^FVR^g>xL z)>m0|G7VNk&_q(aqvQvv!=);j3ns#v#id>#l#J5f>vCws-$J+fl|D*pCLW%gHv7Fe z^;40)t|DPU%bwLm9Yq(szgIui_q!Crkhdzc;rqP?sgXME5}Z zmsej2EZ0brB1Z~%S!H|^&VP_56)Jc6k3`IqjAx9NMgEeYjIdW8xd=lMvR}SHo zoL+@Jqnd1$jnRq}`2SJ3rNMYpi+Q4MlmVQ*t^B)r7eV@c&R&3~Jg$aKEB6aD=@{T* zZ`&I@Bdop6nMG(YRwQ4veI}wXjA41*>PNJH9&5}%HF3)<*^j^CR@O*=n%bgz zwQK_!9P6rxZ}Yu@MOwOg)*8yXQpC*S;93UZ768dDjHfd6RExSHNzSRzd{BN7C>Q|) z|6L#)IP~hzc*L6hn$i>@F2(ONLa~Y$>q$%bb1O8t3%^yvj7iQO)5CmkYf#L*ZEme7 z%wc!3bjRMLCrH6bNbgSWe z!25);Vm9>f+kt?!OITDFxgjoD+drPqd~eQYs_Fa!oP@e?pabt8$v5bx6_fsKwh^<1 zn}j?GqR`t?Y6{)J2A)$UJBW@#oB$l*byi~}#@3bdvSKu(+tN?eb{x=q z^#nU&qcGqy{2n@gLx)d-zS8S{Tc{ZNkrQc~2R5CrZm!oG_%Qt^mqn!4*Vp=&b0Zrv zYD{l&C8=`cPx9<@FKpq#D%m7j`T!h1o76dK6FdN6*C6ptHnz81Q##xV%1dHOsgUvEjUkG5)71t@lJloDHA+q9+H)!5o$-IE&H!v`|#j;%4og z85U*!I&u**!peC0E&dak&&m57`b*&`XFuXO)5h7qN^|0#Zu=_;By9Peju!{rNH=Gy z<$FSObIO8ZyHUsClZ_c~(1t$^D7s$1*c8qAeA&X{>8;SI`?{z3O}USj^tO0e!^Us} zdEB%A8x4h`+2`Z$M*|$IXFhBg*6*&4{wRK6kPl1`Ukp;%U7Lw!<-@+|>5Q(lXoJk^ zO5S#7l7wR*2KL&Kx!&d~60lTlemQ0e<+J*+%QPtLHrQrZO-6>J;Fz}O^HnGjGhKg; zwq9ouE$K(>#XANiQ-mPwQz;YKP((tq4Di&+N^NveotE_{Guy%uhDe5rlT^JO!(zf& z#%RxRk#S^~uhSK;2~$?dGFEn~lpM_5jirr61y1&bhmYTh?jC(uKSk|ms)?p9vo5LB zU96fAcaa|@Px~-!eIX*TtLHVdOw#RJ0OY7E3mscK9-`k2eByhX(7v>UCo%o&%V({9 zm9-@?@C4Up@^Q@xqo0IvaNMW}Jw+rc*YNuKfd;GfTSafXnmEIr*y56y%^x6GmCu}9 zY+6+QZKE7v{<4lRgjTmIVM(N0k$5{e&Uqnir^;pj=E$m!39h7=`K)GujB*s)>upN~ zbGlXFh~8`YwDIK_3O#)nm4A|r@ zY|EPukZZ4tlI>ItzqNK)dIc{z2MbE_dq|BiEYnD|LX5Sph8q$6~-~5sqq9PC>$>=kou3R@&Re6%% z(ikhas*@<@Q0aWd#IRk2;mc~1ZsBa@vahv{`vrF+2~jw4uEh|U*4w4u3mP29k417r zl52Ul`CR=yitO8tMYC|9KLyixec5m7t)u*USG4=4@J|DqRDESfSI*Fb$eVq2n0uB= zfxOJ$=H2%vAh!i`8$Y8APcV1}wfOmy&)0!Qi7&U}>vTK@(gW+zQ>qmqFgaF>nqZ{a3ZppA{(*RuowqR5BvaM5J11tviq zER98xoD>U`KnHo<{0h=1f&Iq%)Ru1C4vH!R{Aq`@gT^`#2cGhgV)kEz<5C3xPp+F9 zBJ3Rt_-(D2bL@+f=9iGDg=g>eAQb;lGs-7c%1rl#G^Zh)wCqqa26DGYjjSU+UdXzp z@o){ow&SdH5u=TC$1fRIfx7`O|HWqsE9YnS4a!o=^AGLbVkv^>?NJHUpK%n;*!PpoG*u~3`(yCU4si(5>Z=YwlI z$9tD&CeEFUBh9+B+OMFBnxo~cZ~~U-_XMN{Q1LXCiySJR^0c9cPR4lL9IJ^fZ`u!Vg__iGV zI5s8y02>`2W`|6u3%q>x^x3m%kRg98M}H{aGorSmZsDy`H2srU?FciqjO!7&u@d7B zUh*XU2Oxqv=JIKcnpmlZi1iyBfUf4-Ap-+Y3BI}00!KUKJ8gjzna%I_T@l%}4PRvhp!EUEj+_)hyM3KrT`(jMBy?vVVv;Mf8^zxR@AJN)Ibu~1$Jzir! z^KQ6M4`;p|Mo(y&S*=?p&e(IMJ!k7TlwS|c!bno{Smx>?M~CeVk}C*u%(o}^-g)Ir zeu!5JG3sFa9#LN%Mt?7*0&?I>yCgTz`Q?$ce*5Nctv22ckHi|sNiXug=(xxPnfr#S z(3jSE>{`>G0d0d$T16p|r;JoZciSE)yp@2L(^O zACHCsyJEp1RHoG=^!AlJTW#7NS_*jyTPH0%M?XO3aWSL8eJ#<=n-e&rQ_EBkDb3I7 zSTmn6Sv8|dMS%j86&_bhHjRGiyi$GrHQ2)|Qr}O=<^fPrWv1Z-}81B-jlb&`o$a zo&l+D$O^3B75lUXfBPZ2mA_xG&6GOxDWM^I?xjBW<(b&lQw*#VxbM)(Ck`~a`VqE< zq9}g%jbQ><|wTmAyInr!;tZGJf8rCsfC&fbxB>sW?rhhezpXJ zAXXjv=vf3<$!2R5lM3W=5Vf(jw!rVW>MjWb;1DDT+0v+ueD7O2a$zh81Bj!TJG~J73Fxy1uO1c{u@!66g({=YX8OFnOZ~s?qeNc-+T-nc)rZK*Mfb!TH!6!op{zv0F zflu2p_mcGt5J%4HAriUJb*&K&Q(WS0G#;o*oNrll`zO`8>yzeOhmi8hiQGy%Qq^`H zX%O0Xopc|g5uX(YMN|NcryNWDZ~vW_Z^FECx9LBCrANN>I3nVcPZz9d^wb4$h@LI=htB(@6AtJ)0yE3st zU!B^O{?v(dDN2zD^VKe3D4l*t(jW0Bh3Oz9;-)@IVG4QS(BZTq`JtU~^rh;EBiMP&XvBO){YCDv+#}l9O z%TL1-Kn|eh=n7&}Hg(KwcVhS1BASOtA$pyIzi^jvsyrh&!vx~x?WkAhs9w1KoToBGwHQ^R=1{Zj}MB0!Hr0d>=TeCY` ze=aL&q@(?P#F;)tO2nLX4sMV)lR;7hF7%>vw)_d?%7tQzM8KlUNaE8r zc( zk!Y|k*f)f|HMLTrM$b!^Qd@J!EN^wV{&BWmoHG3nYwA-oT?Fq=gMuKk^LdZvFMS!# zl9`>3qIB8#U?+QL|K2o#$N4hCd|FUO5_!Ox{up`UFx^CGrNV*I!)?V2JZ99F_WLVu zSqH-T!mVJ?U-|yNZ5}9N){EE5gbazluS<3l_5U&2^zpLV&tX#%2qHEBbZ^kJOXk-%xsg&9En>cgYn)F>Qxss?BL9&HdQ*brnnFw&`;Fe5-^q)V?mr} z>QQaJAtF*{#LrNyCjvPj1SZX#2s~;=8Fw41U#ku+8u~QA=GjjKl4Z?kwi!y|8Fx>DCx0Z483q@&|4@HJPjgdExZ5eGAlS(Fm_{DU1|AKgs293)_^r z8HCX^Huzez$*rjD_zG9ZW~4_PSSVd!txd}pTjPh@R!n4ER`&duSW{Bdv@`2Eb?nzU zhZ|v-KR^F)&FiL(U;Zl=LBM^i>G?Y=!EPTT!*u|ahfuNSeene+-@``|L>H+xW2{y8h3I{JVwW@3`gGDkGk(zv2`}#b6XLu6{BBz1afuq(0^7 zvUQ<~sgP0OgJi|`E39t_*xuQv4wBUjH~Rmlf0A&K%VSsXDO% zpoFe{4Kj#xT3BDpO7E5)hiG8x=bxKGrolW`x`cL8*&7zkmG%bhQ&m-_vu9k=LX#`% zmM2q7AJJ#rX)|0OuMe~g4;0W<Gzh(KC zBY?du00We@1#48uJc`R2c+B_8@Jh>G>MJI<8vvJO zH_xFnwY_X{#1Fv;4mR-Zg|K?fBNbymmvT$2$R@-ZOJJCwXZIx3v+6P1d0LmmP0DG}YB+k-!)j^Wa1ei@| zHHSt;#rw2$%IB;!kYA}0IP-TtGl>N0(s@3B2aB86hhvME)BWLRRi`IvZVe9DOqfp&oH=$COt0))5FIKXzJw%y>8$Z}b9%56yxyn!M^<^7TYW?zUf z#FqicXc{erfP$qmd(Hn@9naPie>i1>qFH#2CZ}Sjj-ihg349s9UJk{8gZ{FB2blJ~ znJ>Y3iOZZ3Ybmjc*P=jnL030hyOci}YH(ya~VBl}LI{f_siSN0EDkU7T(J4p;igMmfKfcFl#1K`o}JS}btJSPYe_ndcr>W^_piFr>XI~h4X0@KCxyBH7= zKCZH_(mqzh{B`Mi*SFp5x^tZTo{0=z_y$i-lYa_hn+4I2ZDB7 zveCt3JHL>Wm|zH*WJEh>sA5*)j5ZZTZ8$$DArG!ZK|A%I)j*BwKR`Is#o4QU4I;;k z(DB6~etCfWUAn_|v^53)^-akSQep4ey^KSCki|6o)nMj^M3eI7H^?}Fh^YyeAHKN| zrqdsF8372-2k+#ev~FWcA>F{}ju#05S7we0BrmU7C%d-*Y<2`LQrTVylAa6)ue6FTu2KsLu< zPe1chc}Wl|&p~v;S%;=Ws}wQ8V&IqE;HZr_DVPY_N+4+{zpo_~${?!M&di4qnB$mC z(0DePlO^Ie5D}obTR4hOIuG0@E4#f=^bb%ON7ol{1LKq)HvQaKhzwA7rJaZGxO{^@&oEP6EI<-5BU^1veno0Uq>GziTuFF)i<1-*lN2*=D0ep~aKmkS^iq!>E zC=@8jp+Ju{!D(xda0h#sM9z;zMg{Ipz@+nNYXrQ=T(0l(fM;Hg3aEQz*mZyqNf{cF z3C^1dekowinL43d)VPyOzXlL=OB-__f+k0V6yN{7ys6i@$+h_P5WFCGKwvKvpoH89 zT|*6=iL6FJ6(GAQ1@9n=_G@DHaeEsFkYS6G*2auONWrby+k_;V*#{YqvMKUDwdUcInE@zd7vywEyB1Etjgz0sC1kwmH=VNP> z2UAbAR5=Jv^I?Fg-nK|Btt}rO1R?7ho{TtW9mv{qK8ayaH-Qll(lacow3&%$DS6fvhl>Cv zG(w#fUx(1L*j9!f)d}3=b^prROOxujShsV7yM?1A3_N#TF<^nyq2*{`Vq~^oN^%xO z;s=V2#KvS;0T3Lj_vLaXfk^|7H+Kb*;$%(9yC*>$;3H}Cat-cYr|J!-?7RPhBQ0V= zc%e1InP$2&b-z@aC>#&cG(Xavw0pWF?#%Dw(sfyNtC}AQ<)-@#ML}i=CdF2zi1gqCE1RY1K;Dz!0O@cKRZtH0IrGAejc)|)GIRElVyk^!GD-Y2H<}o zk)Ed|0~)kmCR{w=LDFQu<2V8n;E@`^J9jNq+MDbD0wS54{u_uiL{$~KMAaa>#xzNUb-(=L<`m)Sfu;klKT zbYY+XwwiUWn>*^#2s3lQf3gF(^Kb^iM;b0_k;BV^?0f29wC=NmsU58cCHH^X^aw91v9ZE_{cS}pBGy>Ayxd}l)xiu$RR?M1NzqQtEhYwrz>ED%%SaYIS zA*G0Q=Yv%{MO@%<0`D5*gL*BaJ0NZ~(Qk5SpuumAN$ zoM2yV!bcVPT=a2mV~4#c;>#l^=Qk{EvQ3neV?V`3?T{jWx9O3%wY{{c?Mrp)^*WhQ z$YaSH_cZWLKlK+q$5Q?x)URuoEae&%bw(Ammf(+WtN3x`V19dEWQS8rSysrVAx#`D zhw5ZTGrY5Eh*P#yZlozpNik5=M1V3ZKFn*x#_<4tATMysh-gZK%JLM{j>K!Fz1TO| z&4OCri);WV&8`u3|LrIhwWts!YqA8+r~Tam4zhZ{3#1fk{IPOcs}TNy;$ZW*kUD)a zZj+dz0+9axi#lpvhz%FMiULQz5~>3G=S>yY<3Bk$!S({NBMlQqhfyEZSn+d8MF(Ve z8URPHzBDuU?e{!ngey&s{A!BT3;X+V|5Zz|u4`INvWmt}a?q<=>>p3;?_ooD-V66G zyrmJ?*vdXCm!IS;Q&GzZ>JvSC=b=V$GaoU$aD2;%#CD-(oL187y>U@NrZa8M*jcZC z<*OZ2LKDVJMk)Kkjbz&Zt4HEO_(Qlsl>4qYnGs>c<^&rLUMHIBZxz?8)6+_cIC;8O z6@dX<)K@pI17vLg@%l*_JI1^$b~c(5VL^NW$mYo{X8>EhHL<#mw2ic&;jw2$CXl@ef18H%t(wY7TPR+B5U`*f;ml@#yB}!!N6q zo$r9@ig(ZQOw596k)onZZvPNR(J6l6iyNOwJc&o~h)`WI#E3j>{NFu7#yXvNA1z*jr5IEr!IjceKh#p7$Vh)cHk#v0{8PIWS zllF4QxEC>T#}!WS;WK5dS@{#cE)X}GK$*7p>-r~BlhZbwKDjJlI|&^dv6Mh9PcA>@ zRdA%IQ#~{&_)hOBnS1G!|7@f^Ajtst6tw`Wur&V;_!zrnqJ*>syPpOz_i7RcZ3k_v zJ)2}M0Y^Y5q^c3SBm1m^z^kBE--$MS0Ha<)rt6Pv9G{8ov-jz>_Yl!n!aTvN+{})T ztKbUhfBd${@5fK=IVzb)7Gwt4D#+MLy$h8m4{-vVUp~5kT1C)9-fG!-{;VaOZ0!8M za*bu6YwRdjY)t7A_MI>k)ggXTFeg4hyj~8-$(xSwuVrHpt_x`Mf7MZU!sgPTd1^n? zpM#~wZFneS)Y%^ zKXpPpj@rN<&n-6kO_0H2%qu?(QOG4~|3Sm~@%KY}tY0EbJ+fgZqTutHha+O4>O`jD zXdA8x*eHKQ7emRWFa=wf}qcXV|OpdI`c7QiRlxsj~ta z$*1{BjF@N$Z@q7RA-$cbAeKM*wAyGIha((W=a@6U@RGiO^=tLlV9+zhM_u&M{I0N7 zYsDXY@^)4B#ky!(=S6Ar7j9AF`5$RaaMnJM^f$52?4Bl)={qb%6Kj0gGBxV4Ow52M zr4qFWxNzKv85ih`@7L?geB%0yf>Z^V5iBYy*!I7ZU`S*z!2RueeWFjpg?^}RRP2MD zJyyK1^XV~G3yQ3mN`Xsr>olqT`0PT|vn`P{pUACBciv|!S;?%?Pue@mmE50-`` ze@_2}D(>VuWVM~nWz;ZtuNXT;RHmBC=vsZB5EH)nX~6gm7pb(b^i!lZWr!{9=$f;&hYS-Hj}0s8g{A1q zC$k`jTKLcmgn=bapRJG>u&TQUj8<0LMh5XdS>BMhH`2wkip8yBmNjqiX;1vZbA65i z%uW%T9xQzYXIlIYZC!qHD;bsjeR_21$pmUkfqdTLm-E-h$*&exu_YE8iH#Pi{)%?57%NJfI#6QSli%?)fk&b@&h^?KtIAl=^jUmRaY|=Id5BAV3zp!Lw1q zE^vHbl(8SL=7n`b4TvxuD7B=A=uMoep%xUpcoJ+HON+O|d*q3@c!Q%7qTqw}z|%RZUpKH1SXIht+dHOrYYq~3Rp zae^8WGlToEt&>WmquA&J?mk5<>8ScQfn0;K^Ra6CmpZNbLyq2f3wn(Sd?eZs>X3ij zqLBN~f|DjnlO4gVuwoKqOB29(ThD3mVXSSju+Jz$GCy)CBr;yqIGJmN5cl^?=wED+ z#__+g#i>#z&aBp~)9~RpC+WcdAQlx;)wt{#CXNpcjB=T2FfmQcrgqo<-->f`>0StC6?wFqYxe*G zEV4yBKe{X&KASK?V#lglCHz=U8?1&g5Fy0-TK#OVop22z9L7-875j-Qo&0Z0M|n0l z=X9lN1Aec1onf2=80chri4wI=5_x_ncqFE7JME64CYoyjbp1||$Jro&aw#)?u^X>_ z#}WLuReVA+u%0vG<9Cwpe97b-1OEQQ6q}HQppW}}O2fBqq};8GdrX>*ljRw^NsPhC zti1ONqk50LztcsTR0vX4j*m4ncmEV%b54BeSj^gT>5Pf+QYjT*L-nQonoJb*A%d)5 zhPHA6_f+3N3UAcs1-{_?l3y~uq&5Zy%Z9xmQ8aG-n<$3*bi8eSV7tD^^ni`+Cx*k8 z+eXT3OqYH>OTgo_%zVOrq5Q|1{j2>AL!-=;>I7vp>~BR7E0NnjJ`qP2gDU*Yjf?AJ zE$(+z8c9?P?9kW_#O!;WSf%%Go%o&rJDX!0rlaAz^<21Q;S=Nbg!4FJYsPD9bd4WI zbKdlxrbo&J6??Wyuu8ORb{BIWyp&-PX372C z;0aw5GTovtfn2r3C)}IFs`v`-zfB^dfoyE+;DsDGHM-kHnY}J=5&4&;M={^lB#*?9 z55zI{hlMDU=RS!#XxGxopYYd&%9x($6o6elaq@>*9MV2mtI#xaz#rfG+r;GEgGAZp zrgz_8zy0Rs(iu!A#@Lr@bXyuYPpj*iEFw+tsid^$F%N5Ob@C(ovPLEtQ+F=aG>_9~ zjjF5DN=BIVT6X<>^pkzK*JZ32ID#em**V%mc5gl7^JwFrSiww3F9}h-54CWwDG|=f zfdIpTFjPTRq#&S4J7AV%PiMa)R31TEsi ziN0OuN5fw1X@hJteF=~v+Q_WQHE&?I*KIEGwl8(OhU9*AE8%XbPw5)Cy4FdB6?zDM z=;_P7*(u1$D_QUpsljRAY-xwwptfNOtJ`u>Q(vlnJcj>x1uZc3V(Of?t}-_{6FB3Z z7wDPmEn*;+ClnA{P|M2t`3AEA>LkULA|%GN#rFCH!4DzI+($d~AB70kSgC1oKiHQ& z0L$g3{4R2MiCY!$FNBzE+Z_NJMB(AfYmqW&fQPyaw7I?LQilWT#KyLyd7ZN=wrB*| z)?uw%beNmnE3ep;Gw;=;1fkjZQHAvh&w+zAUGA^2!|rtKBJ2x7haW~>GIvdTPBmQcsq6C$m4B&qZOJuF=^uvoSY{iY zooOYV7+aCr57keFni}?GNk{|>Tm`uQT=ULV&MP|B9mgWR*36fOhCTNX@Eid9i%6(< zvj~>GBJ1mBpm1(9SqWK4jEWq}l2nFh=nGQ3X?INTM>~**YyE~j&`egbB+OgB87cZI zlVR;Dzf;ew@8zSz${~Jl56{Z4S((UimJL2%svw_M=)Y{6z}J6PR*I>Brdqb{4J171 zS9Njie^RV(<0vS;&6kaAQe7L}+CLBej z;0-YhwlI+CG@cEMq5r-`om;~4u$ueLD#`f7J|4o(X#V)K41mlTh`NA1xv-O-G-eY& zvse|%pjOZ6^-7{sTU>eXDvp1xdZTLG{s3hu!Y*O4=R;v!a88o3k<9wSO8V)dwNRl8 zh){3}P#p1F=r(O2cz2ajlu9*`F#z4*fpqa#~l z)xBP+08{V~cW39qLFY#kP1Z>Kn*2c2zFIN~1y| zR^%4>`>EFcD`nlm>{P2AsgMx||4;sGgR)_U8a*{j-psDFA9Yoz=-0DTE9ADO8lzF{ z+_BWZ=@(Ejsu6@nCM`idW+_BpA&d}e?zk4F&d%RN(T>klxg76^Vl>Aom}M(wlK0+( z1xwW;z}G3W&bD;^@rK%MlGF!~I|;#gCQ85Xjt#(H#k@_lY8bcHIW~H(kYu7PT6eaT?Ea|8g7B9B9^fDJ zH#XdO-5v-tF8h#4um1pB50B#%d~OJ3+L6wx$}B@%x%yerD?yzFe6pcQ_Ur1p`Sjcc zLh&@nE3>f|+h;6}tE>OhHz{&#B?A&nR3)(eeoqa9#Moqnj&=D)@zB|ism2M%_=feb z7ZL)`aus>r z;R(Qu87ves*W|CFEYDVU(05bip%MSX04mAsQSmx6rd!#(b{sRR<+*KwZS>LR@U{G5 z_Mw3iO{`xP(q({F`I_w=@EPVK{(fPwp|bq7mpag_sjwKx-|HpyD=})Q+i^O zais%xg5ez!BKLN#BDrUV@KB?~HEm|Is+uq=&+*+uBYh6B*=aVP+W6wWB8DsUh&|00 zZ^kRx*q+ohkfit#LOzQ;s8no+nU-#olI>Uv8m1++R(*`?L`aP3qY8DpQZ8^Uu)*EM zE6RunHSq~Vg=`viuf~$rlpVh(mU1fpeh0XFV)G`>c%rs0yx5gdQ1zjh$W>OWZ4fv_ z)P;RIk$cFtyqDKj6^b(3G*`B;LRnPjX^i71H|J2|xk_P@9G;v)A0S*$?I?4;z=`wX z);4lXSgTmEaM09ZQ@^}oLxbT$;tw!vBlRP#FE5>+Ghk}1rg_mI@%ekx2H=8bd~p0y zvLIt)s=(g}s|bHbdVGJTHU=q0;2&T3;@%fZ{qcpm*KtBP({+CHCAI;~Rg>)88y)Y> zY8{z)LL(m#M{NX+2F*2QYWYct?0hRebS4kRf9x{EeAYRl5zM78qo%kh&*{xGxkx4P zYs9y5LCMnzGlk{gVRO<6NlT0``G(tw&1fVA@AykY^hIr2jw~Ss%)@=Mn)tYGhO8XzfKvK~2Q3pDkt$Cvy z$#|nuez-$A67+4eYuw3XC=8v0euJLShO~2gN{2`=c_Q@zG3{F#$8YzV@G!>J5Nn|l ztF~G{nX}22E#)4imy4bdxP&V?BThUIpR%+@M#7Ome|^7N>BS5*w992llkE2A#vZ0f+8 zowZR*9oIDVXI7>Dc7zaZ+NgvD6M!qmmP?Pb2h&Gsc9ERUqEQQ7kvHHM4K;RF`&q)f zXd{c$?%i*%SLn&nr8&Q2*>vA z8pW@MM?I43amouhC0rvtsX9iV=XIi`@PE*M%e;K5qg4~_nw@%ZE~bD#8C{O8((~Ip zUO0BtgWPhd^{0z+G7a`g?j6vlPx#A-LDtQ41PT0~$nJ1dm?7^zM+iyu9RPe)frn~v z^yHmig!L*qN6B|xxmOy(EWq(yw&Qm6)=Fv{;%-w!S~-UK8ZJJ284tHoo{c1R`sn;n zwXIw|g#4NobZ=e|OUT6+hA>YAO`|rJw&BTYQOxtRhkLH=YNcbynFs63U}Ttm*A|Wu z!I`S`qLgP7mM{2F8u7+Sdh=sw@ATu(*f>(R>!-zbbq{z>Skgb%dh&wgphfO-ksdln zdpuHLIcqRv!d4h&Urq6&Czp?lIZHxBh8c3mpkn5>H*o>>5hoqGO9J;eoo-6%@fU&85-m2-1s)mX09UqNxQuYtH~pFS(@~a9OR(3p9St80 z61;IG?J*Z<9?CE9xvfWeoK=wAxVnlgoCkX?&2Z&$Ac(8-+jqw(Thks3?o#5Dk{`H@ z<;YMP(YP5zJ=z1S-Jh*Iyvg9Ri$&Uc{TUA0yvJ*=`c>VKO%Arkk};8O1YDUZC>LA! zcYH-sKBj2T*(HR3j9gkM6>9#3;*Oq|yIaG3&o|I+4cU^HBepK_dP;>{5j`Qyn;dh*Pj|;d5=Yux_?q4H_RHqz zYL~ZDK2?TL4LK1z+UJuW7!58{e*AAx@O6h3yE|IhoMY`9D?=g^EhA+Ox za2lo8-zc;}DEKvX^Ff`LoeK{CK-Y+bVQkRNE83&UjH?>>S?X)VxINANRTg&_?_SWnR(-E1&`@XBQWrTpJ>chWn>^swQgxUc*MqJLNbv;}aUExpCB>9Fb@AsyA&h;?-5CUfF==!(IyQ5*Wwk0pQQWFCfqtt8v} zr}iGNL((kCMq@J0s6yGp8mC$AfZb{+haSfh8$tWttT!#Q38NV%)lWs*lW{_FxL}b( zz{p5;zxEf=CI&l`3`7|;iW2f<8U|WV0((($)0#S|H=bPC{Xt>GdxklXmMjy>i*GzrxeV1#~4yoH3KVI91GJb z^ip^W=i4tER4ve$Gp(>u3+{MN`+4$>C25Qgpo z14E=Tc*>MP8;FBD#1}5iV|#A{=auR=?`kA1=j>Pz6Pyi(#CXElXv=#O?dH5oI5zjR^6^yp+_J{Yb>N{C zB9l3PhE`)eer#RvkB6=??TdXU6{!lXKc>vXu52r-AESSEL_$}V*Xc!oWRN56>Ix=F zlJ%j9%3hBDT3A2Yepy7jK2>GS^V*R_-)#C!$@dE{zN}?lkKU5Spb(o815(ABJneCI zp$$BC&5(nkA?BwTw{hlnogyyvY9EC_*NC$1upjfv;G)mk>xLI72F!dI9ThkMS(AH4 zm_J^R3oD;DXQ-mqg`me%^tsK5+=l)K2n?!itDYKHlbavW_NEJe%F;NjS!pB-RkqEW z5H_ahjoh{5h|X6hRJ4Z{ijkh{vx*MNz~|a0ieke!aql@`zfk}1Rnd`F z=JzQh=`U4@LrP_PF`G$Il*(vi{$3ScN=g;(pT?yU%Ehe4ZNA40Ptn&X;hQZj8};66 zK$d?r-~fefr6bz*4=!{usm!M zewe^dBjSy;Ng?pbv*AklZRwo6J(+>Lha7LpPM2hqql^6JXIbSO(u<4?t&bjEF6Ee| zWVdFT_1E+XC+}C)zm=0ciGwH?feg^=bVTpm5odA7QasX7k}&MFgzbYqy7zvJvJiG* z>pShKHDf&e#rY)+eXi?N^rqzVmC(NspwA?f$jD|(uZ9M)PWq22*y1Uup@n;O&+K1vJsIBh%JT0?EaQsq+41#>e7l26=l_~V{FX9!!2AwCY|^qG#fNog3HT{c`a z9CRjULuO~Lzvu=!bA`?R!haJ*3}vjj_x7*8qFeLLHqmZ{esk>7rHkzJPZFGv?Y><@ z*?aoeteEr{{ge3V6Y#N-^}$-Je64;8dSZ#ypxaBLINNc*2fPJ&AB^{D=P_45Y7fhd z&XAlkIm1Y|WmwJ&P@LX^y+3yrq?|XdqI@Ua)f%gAnfjh1K`3sm!)xE5&iCT@_}R=? z3u!Vw^WRx8quu8p`ZFZ0mn~+L=zJ?c;R?1Q24hP z)`GQO^4H1qxAh?Ti;?V*>T~L@?3gM{e4ZtaouyQWEjOEV)7ZCaI3K-(&`IIH!zcfU zX0r;MNb|;KVwQ~|lY-aQn^b>08WexR_R{y_kE%>h0Th3ivCg+5rPCGr-BI`A?*l0Q zC?T5@<`eZZV^wSEZ6&_a$+ANTVi#S)aM|phX(j=IN|W}n$SRFMvY<}5*5<_&yQ#-0 ze3(f$=%x6874>LkA5tMSb>BML zEf@V}OY*pe-Xrq4)EC5=6QrzlNObEK;Z_#yadhn!Jk@?xzTQy2nKxaU5r4>}%V`Fg z+;2H|$g!*3MzC((OXgq_e?+S>vvq`**YCipOCy;*>R$Q%y#24ruj^HuzOr=t2e#`7 zwW{Xl+m!2*?_>N!ol{eGi-~zKE)feilgb=6o_+wLyQ@_5VpK#*W%b6-W;hhnv(3c>*N1V`O~XPAhi! zOEnz*+gGpU7+rdP@wgvNIJu>tA3kR7oo_6g5u)}~ z?{+Pwz3IUW_S%Y4`?g!6jJfXiNz2DBTCvohU4ANetZr~R6^Z3|%UnWk-QfZChB-I} zN82O|cblTB8rcZz_bLqIr=Io?dvXM54uXb#C+8h8ts6(;UoUw#Gf^dbU53hdg> zKzpp)98gz4w>VT@Dm>0ryZh7tKYNvS1!R5mkqqI{g7y-IcR)8qZIX50f?h^G?TDU~GuC^q;SAD;)e?5#UM>*VdW3v_L$8#HN(lGAq%s2)=4DB?Ml2e0lepuOkzX%c+2rD`U$*j1}HEO$ui4HGs|LzcC+FA%%}| zmd0s6-C5FgT=Z=L0&&zjWI_u~(zAp4kL08GWK(o<&1(NxO3t?5gmhMTz?|6Nik*Z& zZ*1L%82ITE68^A{M^3U;pt#gY#%W)j`1jkt9iQU|5f~veH9FYge)N>kP=(kinR=nm z!E1*RvF1yKyyO1}q4ghwGbz9wkHW6r&TX4?j`;jM!1 zjI(*_HT1k!RCd#!75^~4^l2AMq>goQs;7elQ)6|lJ8GjVAzj5YYzvfihoW>x$rZK2 z{c@2jILRYW@yRv8m=C@M3uLx4-=6*Q#r1&P$kdM4apUTwJc zIrdHE5wz8b%8p-6lngXxrQQ~-Vaz9`U#lkeH#CU4B~VM6nO$fSYLi8-Ppn5a<#<*1 zF84I(3A+r4l>f5=k+WX{pqvtShwx&tyCZ3r3_-0HKI2~J;C=9WN7eJulS1mF`9MFPA zVOzUS7S&!w@+zdCX43$6pc#E@jaEb2@~)m+!lf$9`A;1SwPfKp$S!FVXa~MO->(_E z$O$VJ{!-Z_W4SMHl)(O>v|Ap(zKluL9@p|nelLe zvTZl$581;}?iE}9%CHh!Ifj9ggvb&Fw|F%y`>OP^%0;D!vwjWm(c3=`&6}Qf_$j=D`BoH%O6k;qwRmv^-FWbaqzcVA>^|0fH5= zadl%XuNWnY-g)N3ETmcaeQ>@RJYs5?H|Y%Mu=fWurqCj3R!GpkMVBmQ#Hz~T-ELo{epavgdu^+DQlpUH0TH|6n4EUN`X*(jt5zEI z1vuU!y7S2c25WG#8?PK}eFgyYR;3P*$P9c&P8!FB-@h*GJ3ft>tGg8fn^2jEoeHFk z3fc6#mBu7S=m6Y$p_$L0=@ZlXf;aVsAHBdd@%20$SPOjO)x}Tlv8yyo#%P%9JWj|T z!%K5YT*y=1{(zQ!H(LQ1Nas!vf9S%LRg$Z_^ZA`^f6u!Zt}f&|!0nu;JtdSl49IZu zxB2c=f#atN8NQnC7P@vB+h(B$8@ntsf#6igRAa4Uzk7f8YG7@sV8pIEN!;4*GlY>4 zZS+BewY7eJuz=#m91tYq95rwKzGr$OLh7w!4);v2zlr=enn?JeO7j#C#_r)hbA(EE zoD}{r7KMM%*ZYEit=IS$vi+R(<1 zsj~8JyfK5m0g*w>ESrk*2X&Kd`yr^f-*@AeF>NRCS4Aty%04fBDa+@K1AQL#>Ao@* zdSyYxz594 zD*Ju5e@mbkGo?j`;$bN=Y1B^N8VE(x^Y_sv)M@khH&GUN^5SAr4Zd zie55q#P!6MT1b4ymL@#D1q&)mI zK1`3CKr(8IqQn>P*pyGw126=m*69Q3Qe?Q(5ECojEJDd3y|S$4t$^phr3h$E|WD-MM5VWF^F3GgnwdB0rs(<5EBq4cmL$; z(b%W46iAS(E>53~_u0;E>F*5Jk&_$@d1`cQ$YAzxyo|irs0aTYKp&kK zcFGTqRY;BwNaoST$r{;6-z&T}4-Fh3zvb_get94D}WEKmP7!d$ozI8Jj~bKCrzBCfg7D`4|A;e&qo<&T~^YQ+9hdRw*tCX-+;+-SJ5C@Ue1p@(Qr>%kc1k|K*jI z;^O4t;N{|D6?n#XKZl{^-OSwz@K8ZUUIsuw0024gA8@w{h)a7}zXkv$C4dQx84W;q zYYiaZlOJ#;1y=w-%t8F;6$JAfq<_Ld|NWbH`vAYHm7|-ZtCgb@H5c16fL~Hx3Hd$- zaQPFa`xAUPK@l|@3sfU-2R=yKgGmP5tsyeXOGz24s;kJzzmWd3QHaE*PEL0BU$%E} zb5WO-q`sdP5AqY-n)~em7gIA=Cka*67x&?R{@#cGy_`?|_1SUO`?&UQ^J4fzWxMTb z+f;n!h}r-Bx&LG6KPld`zbPII_(|2jDV`q~{)ghl-G~3Cc=`9?SAP+`#y_9YRF?t| z;&(7iZDwU^4uuq01FuA1fGE_ z=lyX8zY8wslYbvSmVdt%vJ3zs9pE&DE&qN^p#uP_Kn{xD^zYXg!D%&dF#w>y$;s5k z^iOn1;BUm&U<&(P1OQks0RVp#{EoisYc9?|?;~JJ0szX=-Q5X206-D};5P2=?)u~1 z-EAHKAT0nur^8(*fQtt7BbXy0JOL1K5s+{Z?wJ~Rq)`51YJVaEfBhGx_7FfsKte)9 zMnXZkPml=y;QIhFE(+db4hd9zHB-nFX9CWk_*^s^$*L|w^&dyHT(4Y$AD}-XA|@fF zqi0}z%EZmX%f~MuDD_-gMpjNH2K&eg$j zfb6FN1a`Afj>BRrVT!$1j)AR8EBlgjOY^#8?yMRkziHo_r#&CVw`WQqmLox9mrt28I@hTwg2>Z@SGJEy`n-2J9muEMsKA$F6x|u{-v zf!vB)e7+^B1#J>WSJ!s-O80qiisdm?Up=gGF3%6ecpgvf_=Rt%3WlWN5w{+bykbpHHEr+Wm` z`n=h=*PiISV06D#(dKAD+V@ce`SjC5S&lfgo7yDhPoF%4u~$K(GO4%zRg}1P`ti}% zr99h51ug|^wchcT@v`bwiEV*>s?0te-(1(GLe`D(Zwz65&esZF>)ioZw*{uK5@u^p zzGmn<_JGH|$BVMJ8O?~>dvm_l!*9x%p76)UIhj_T@xTJw8C4K|MXX1t`&;SiV@b-5 z_QtwzvS)mIn~bW|gWVko>=yqY3Jf?cbfp4%L5&zvHl^GCvZ7 zqq9LT1v}E8c?}dS$DjaEMd&eM!wn%><;)Fk>@~8%9q`+o>X@+dhL9XOSQK<}2bkPP zQu#C1(^-K`#0u$3{|*Q*?^r3`D##ATx&3kF;h)%f2f!Gc?tqOQxK-CW zeQE$d0m3bqUwgEdk2D=R%#LKl{@RNPXpeTsktT@8*s>rax50mHmdN5)MmwHq z0?y(doR^gnfF^c<_9sTMHD+TN6R-2R3b@ZJAHiX~5=$@hAW`XtuoYFvX#+T%yFRo_ zt@$>#ix})gCp`ipC9t)Ms|Ox*wZ5BXFDkL~zhZ)0U3e1Y)=UM86?DKN=i%rvJ}+U^ zcfi7v`$)mA)8XaNmEzgHSz#P-BmUa|C`wLia2R%Rs@7c;uW`{`KnwGi{bSmw@KL$|G9rv$LT9S7_H`=q3>q z=-89}(Q%s0eEK>No$({+w|@Lp*}!nl!73&=FrZVcda1$esOY@&*yDYn;m%a{9F1~o zRpW%V&|~(*^TFDNhDJqwWA43c>5KC0zXTO9|6?UX>A>KUB$sLD#^etP%<4m>aVr zKIsX2_gGv%u|yTH$B@w@vsYJ{9dmmP?;pza>WaG030#%S4uCzyR4*C zNH*+wD3}eVQj$BsqB5!jx9$+hk86TE)3Ha7H734%$J)`cDP{a+r2Oe%9!C^y@EW&g zbn;A~K(7om$zB$YW_Yc&>?LamBg(lE`@wCDO`*R|yX{I@8#b;H`&F9;j{7vfUY%CT zQt<1eS%S3IDhBG4Vc0kvZ4lf!{HYt38W?leQ~=2+YUa_}xbBfwR)g8&sV7HEA3n|Q z@WSnyYMdz+C-@>Lh99Ro4=fYs;Or}JeK8_zoi;s|{`uaTt;nw6af_Fk^}5CcZGmU8 zXyCMjG1%dwizfi(M(;Mg!*t{=6ba71L!=iQ(JEHNO{C--(428psze(H!s@T+Bj;8x zA7j%Xp@9uNvkM-8Ku!9;t_M>vOU;Hn3u*ijNw*wLNWt zKR5=nN6KwAn8@>+=b>LBMyCe5d8;pV+UX|W>2Jq9yY8!R*%ndRniLSQnA0+I-oAE;#c!>=viq87A)nPzmVGJ zxdl{hjV??C!XzB^VRgdWrSpx1HzPR9X1^+5ynUIx1Mlx?`vscBlc_Qw_sfY;noqQ>v+p_k}B|4 zxglWO0EI3MS{lyfzZM8*sQsYXv~Rqt1fF-T+nCzynLPM|@uTb2ky&jlU;V1s9>LSJ z?tx+(s~W+k-RrdPPB;=2KtSvbC(P{VdSxWgtunL&z1G(dse6v1SxtL8)zNQ-dTR1<#9;2_K-8Tc0WEG z4?|9e)sc}H%PsOjL*gN?aA7}_&duCLj4quOgLcewx|hTczkPF(fW%3x#7x4M>fm}* zN6tihLHY3hjw7&vg$_HIg?u|rY3 z#a|7cq|@P>6T)pW<65#`>uPz0puPkVgYsFNE;?2?my=*_wmG(pxDhD#V&4nkY&+^r z;U03)sU^*-Tpd)r{;Ka-LtHmR!C>Xi?EI4_vtY7y>f-UntEkZ+<2r6t$O--m`@Hda z%~3;4UCXVJl5O{uOsQ}FSNC$SZ^P%8eq3h0*5+eF?xyy1ZMp6l-k4^(2~Q7RE`-PR z=~2~C-4GC+udCqR(8DZ>btTp)#ClSn*k-C|)V2isJ5VV0%+-}I)Jv{8*3Qco&U8C+ zt8r`CMUN_P?%lti7mgzB z!^)|QKJ~TtU^e7@Nr_)Fv#ZD(;ur9PI;z{*-YQt<_fP!q(X?{yU>BJV8}@m~dHZc< z0RN5lxF7SU`5hk(*t_mW^%&5?wfXL5>GN1u;ptwTrr@XRjUDy+N+YyZi2?%)YB-1G zV0!-Q_Yy1~-olsR+~DO)CC;3L{R+dot~lHJIe7L*8zO1Om4E$0v5I~n@G|nf{in+XRAq!-B|9~7 z6oUW0Qf+#JxtbhoHkt>p`*Vxka_Ua65e6)R#Zn(@EQ}@f>)~|}^u8#0$TjRs3dB*a zMZWWNR%}Fw1OjBZ6x9ybBmcZ{x*S4DQFr{d7o4|M;o?@Nw8DDI1==Js;oR_R5mljak0e@bs-itiQcq$DzJLb?l_=ZCN{& z*SIV!euWihjp1!R-uBg5XnmgDjtePSaf)ugNS8JN!t{^73Issss14kWXqb-jyRTx1 ztxl0B3sZB8Aq}qNjjLqpa@h)UVLB6`Sfq|Me+Oys?vm)C`Z}d_46S~ z_A;xl!l|U{Cu2uPA;)_-&eGB?jRgzf=rv+ABn8D6$6radM4|yXaIjk+Qm}O-VM6dH zYgfW(iEciwui*SEYfo=c-QYpsYL5SrwPhLq%GwkEuVp$j6vn>?mgx+~Ue{K}SLRif z-B-8mVD%h}Ew)k(C2CJ6g1=|6J-QIys>*r5wG>c!-sYpWyxdM$2bh zPhV++r?hJMNL7UFJo`4n9{Sqxkx|KU!xzSJWvhJ0C4*nzRc0IrDCNSTdF`Ux&lwKE zOGEDc9#>;^crUb`s24h;a~?X{0oKbwI?J!8+^l&Ak>6j7oaMjqK2JsGXnv|`a-k7s zz!@S=wKpOiU=Ho#6`7V3Y}iK;E`pw3c9h^?%JbL>yK7*=1L-q`==$y4}0_IvSc5NU!?<< zv8|q~RG~{~lkGIHD>y6r^U-qp_V$*{;6AKYG$Ywpz9^90QJqWhhEA6P)Gk`*{NlFm z#{F8^h$U{uRwqj)(ed$R&e%SIlqJcj8R=W++jE;+FcAe`Lc4sxCak^H0o2@hmh!+# zlxS`kyQ=i&hiDs%QgRFrzeTkSLZBUqE~2<90$j`WK3gP;{3^b{9b$7`3ZwupfX2Ck znif~n>8nn0ca1Vs6OF%FCta`SQd->D@=XN%|t4+*Yje}wd}ia^%tuhcU6 z|D0N=VEor`4d!F$cJ7}{i&JCVuzci#09OAxYZv3-^~e3)1ImN`e(+M|>|@@rK*6#& z&WMk}#Pg%4p62GNNWXo4sBMvt9a7dJ%y<#?!3AK(Yb|1we!NrcZdkS|&UAI>*g1PiH`G%NZTeQHs%ba%?DnHuV|HkoTFy7%Cs za;u@c{n4>Xlp~SKQP_LWQUi|8m~Ri*$P-8Hqjjez+aH04-y>jIlt0kW>wmHLCh$;p z{r~tNdk7)x5JIwVSu&Q8BvO%e5?MoqjEosklrsq zgN0RJQZ>hw4(OX0i}*NaP07)Me2H_f7T>Uk4S_ht(&d!hsTQ?bAy|5fpNQKNokQKU zBfg(jL{WY4D&{s0EdIG32aW?GCtj0VIOSL_it?5`(KQ^Q7j*T zNd6Oe-q80JFjorSg4}UH0_ms_Fx`UmpDKU9`NIsoHT-9re?Nw#%|JV;^q1yWgV(|E zm$^an!@+XzxAqtH-){e?$rHyW4f^Iym&d?V^`*X4ioGq%sfJfAr%&)!0#}b_%K;e<0 zaV%1^F=zc`EJw8pt0shuk}S*bOE(fP(*#c%iSSL@!(WDai1Vyll96}l21LU#l00v5gL6k*`R zVMi-9qtvgQa(dPqwa>Q#;zI7=34gSAuHN44`IuRbd*HI~#zHJ)4HkoCY~n)h8%RAu za-4ORY)h+%DCWO^p79Yx_uTFiF89GGa$3~PVn8F#wqmFhSAVUAL*!LizF1B%J%r`e_{>YjH*&i%oY)yyP zI$_Ub5~lW9_shiSS+^uNT#vF&Fg!WRnkPBbfxdXknwj*B_8tS&;}RWNl5*q)$&zeJ z0`tz+t4kFJBZ;a{R2h;~SxwzC)W+sK#JaXziz*e;Mp2(JJ&N1 z{4HstCh_yf>cP{dqaV9i#ak^VIPOBAG51$2GU7shkzue9xV^4E)xMK*>fc#p^LAhdR8V2=aK zlOytLVKf%dS7j@xz-+Z>@*cfybBz9Vx%w->_0IxmTM#)QINH0nAf2C@l*84Pwr8HR zomN+=ppK)GGog5UOgz+*eG78ziU!Fq1A@@MyKV5sd)Kxt34^65-r^S0+f#^I*`cpE zROasvMZ}%p$oC1qV{`<#=NS3^Xt}u;?Eu632T%kUWC-+OYYnks+Wsx2&d%m}(YpTxA?=>AQ?v&kYF%D3h=ioOTMD(Tb@D41q z5C6%>?kNiXUAf;*DxcSHrSD&KBfL5yA>gLTzD`s}cIwA2k8X@UJ%tNx$7l zBKZc4Sk^hYOH9h1n>A(N_|1R4MY<1t_qgZE}4B{AFw~Rvt z1pUDWvdMHNFKb`)`JIVu&-5a>b^vGU{3{bO%~wNOvEwVaq_%Jo#>#gT`x1MD;&&z- zAD%{pF*cX415Fos0|;~r_Ln6agDm`dv~nzY8ix3q6x00*Y_|=D7kC7<^lS?aiT&mEDfz@nA@uDl; z!Fl;hKKYs@S)nlW7EHn>fIc2P!b0oy+k)tgtU_na#{AHqGib(?{Qzq6nwn6jgN~JA zK#Eb)^Ks96sjGbZ@Unft^pvzXj9u$ROVD#p?*H+V#dKu%PfLe`5svwRNCoXA2|(e1NT-yQ)tz1c4s=5P_Y zK_lt{BbNLYB5wN!C&xH4wpErh@m^A>imQL%^RjW6@Y(GgWB3VKzz5Zs{Q~ zX2_7@$VVu2__FjT2#a>(qhyYb&I(Uod1|^vb#;1>YF%)9#X&)lYsoF;J!sMmuvXf0 zl>|Ku2GS_d>*ns#^4G1Izh6MND}mt2&^p4R%q(}_0dc16&| z&-DG_h42M1JfPFMC`Q0E${4=|$^E^UxG%um=>nUnh$+AJdja7KR)2UN%o+dk{M?;v z0z2lvmN3z7^Ueta=*J51U5)7f@>%sToB&wNyMCSd{d>UEsqg>rH2M#fAVV=)`Rfvb ziY(aumx}zucPS?QnOZ-Bo*6-t4*0`9{HXM2KbmMe=MF6XctDgsjQS9LcI7&p9<|@m z%oAtDimyq2XZ-742;%p6X8~ z-cKgpPbS_^CZ055n`HQdWBAWe3?u&~7ymz*cz@LRKbd$xnRq{$c>h0O;{CDH|1;OW zEdhVBrhc-fezK;1vZnqKYwGVYss6h(^wys=f&4Sd>W|O-B#itdjQp<=M*bd&n1i9dD8n)?^F93XG0%9|FZmD z+RQy6zQHzM*BE8IcYMM`Dv_>YDZ5`i$WF@A?V?66ZyWd^=yE?knh_lTt(+v6i2m@B z1WaAbA!y3wRGJvzJu<3@qVy5Jr2*u&9`GO}fZdZ3z(JAswjl431T2m7M_y3;H?Y$o zEpQ9MzS0GZgU_b0TM!Yz^;>>~d1XNptKQD8kCKiN1xDdzkS8z;whe>pLdo*3zTXqA znKwZYm=AjaT>_0i3Efo60w&g$A9>~dQYO{6=-F`63X~`e43=|u=x8qo?Y^^uyf8C` z+FOv6jX4ZObZQGicwG%J1xCQph6X z{;R$1Px2D}x|&6zyk@Z-gqRK`k$ETw3VO!k>DT&ilbZbe z7diT^F6mxR*d12!^&O`rIxmF63!`yVQBzG=;Zdn{g%D{Q^RuH8jT%bjZ6fD8PSy#_yjcyf3|1`$H&gQ_u{yAzFxXG zyi4WN@@vyK2|L_rM41DxFsc>S1`wJ-?TI=!4WqYP5G$^Rz37XJo!Zs-r zp$7{qnUIga79wxv%{w==<83oh+-}pw?mHl7w>*bcUAcxApt0S^L%4OQmA*`Gx7@$d zc1!a1Zs!*kan_(~9g>Ra5nGn0>OF>(g>_8xy=x4db+&48k*&3OsbyE4@p_q+$@Yk+ zvB=HlZ#zp2%c2`saGyK4Sdcs+@M_d3_t9~8|0d@fD0KwAa+hz8T+wlrtJCUcEnGlf2~mtI zU06(SAAh<0w1+wnqT8F)c16Ay1l-6-*I=>~P5}LinZi9lJK;+RbaA zE}&Zg^7N4pQ@I5JOtBx$nEH;+1Q@=z{v=5LH!Wt3nOR8%4KuRcFyp^;r?ROm4)miC ziYVz4K(W8$nW=Y#t*9Ccr)j}qYaI~F`t)$)`c#xGgYV&y;3N)T+bl`^x4{}MBClDX z-zA3KX@ZZ=&hJ(JXb({7FD7WzHr`rJ(lMSfOQbLtB4vPvxj9vPT@UWc1N36+@!NX! zF8RC9T3nQ{4YThfP@?x~pY48_M^Xgb-eMfO6SK#W0t2DBdo4N~md4sOg)B>I(>Yc? zww8>3lY;1GP>VZ<}=imEm=+jl;?>06A#mmZ6?CU4XOnzrH@TO?&wh)B2 ziS!0q4gjx_rx0ANrE?r^b454&6(R-BlqfNN>ml4O_PZdJd1;Z2jkX^)QwzgiJPtq+ z#vSjasrNA+y1IRwl>PQ;mPQMBmX&W^w`xtL63kZ>PDi+MOHV~8#WM=lCsBk#z%Pzg zJ||n=9f|b+*tL0QsK``*EX)8?iex1lqgwznJ@YiXQuCpH!(88WhqNaJ6LO(S$U{;( zZ;nP4tJ>?O?oa%f=M<8|)m^%tS=sUkC%3qK{&_<}ZU(=clz)Nqo{OjNL>P*;e9FaU z$)jJOR(Z;>{ScNz(8|3VOlxJz#R* zmtQaZ*US$eIHoiqs3zJ2mwkSb{5F(9r<5GD99eL z*|IP8q7TFHCP^gmuJ$C7oaH)NxJlK?8DTNAl+Y4CeK#t#f&n{ z*0yEuK#Jp;67aRNWtmJ#ISABSc@F*8Q${9?jcnZ92I~wdp)yh_mJkGXv6d`Mdy`!e z(Dnr-NP9EQInXFqX!=yz_sq2yFU~{1D*8LB9sm5qTQsSVF^mSEgM=ADs?YFT~_r9YBb9OYw9kRJlqd%AX3De6?15r>RJ4n<&1a}0bXnvpb7mnwpbq0TD1h0AlUWFw2 z(%87jY^3xS!nh=&ul35BZ<9bh!^W*JKS7ULW+DRJA1fXao7@QvA)k+NT9 z0mR3`X>f?i{X( zK9bgFVlPRM%!zI>Sx<_^R9Rw)cG1Cpd&$PBr`$agTOBKfP4sdNUB%9=PDSV%FY7VD z=fm<*SopK1mxXP`^Dsdqw?=hqOhPB7Xy=m;B7!Z@o8^QX2f_yTO;6q0ry+v@O#E>R zns5>kFiIY^%_ISQ7fU;m_ctl!gbQSi#~|*!_jfk^9$x^p5753tXAPiBK%H@PkpUOw zy(`{~wp%D*3nF#m@1)t0sxy6E2ujpdIg`?IFYt2!|MsKtljsU7DnE1wde*QIY^gLs zaBXMF=n0xSgw~5;`(|}>`XH%dHo-)}?}Jy+lFZ4PL8Sm`wks`+FRM~?)w1vK(8qtg zDJV&~+SKa<`VXka2!Dyx{Kl0?;--K%<;Q^<@+He!*@l z6vrQYaOl6gIHdqiY2+rl|U1DYuf=7O(iau^(cAL#&~QQWrxLPdgAm4D+0BHsggRZD3${*3V!L^VAI z3_fUYnmUHS9zfw2D5C6j0pi|5EkHl62KDa@`z4)rLx~GWD6RiCx}_9@KLg*~-3SKZ zDDu%Q$cd|fR#neM5dylE@j7fx4uJ-$4|{XA*$7Jh{2GZ-*%@&#TiMx81YRySA@XyHxY zC7GjlU6IvkE#5O@6BYX_o&0|h_}t%SYaZ&Iz9K0E-(@iJO@7Kjp8acPObWZDN$Fxq={fIY ziTJ9Lw{)!45ZQJ`m_Cx5Y>G6bHqj)JM)lMu8rX3qr%F=jZZQG8MpEk`br*YYc~{|y z7R{2vAg~ww621A}$2@O7*{T7TX^@rNWcioOotkT;*BQ)qU$|k2zkbf-T5M1Zwm zV$lL9QRjIv-l4K@Nm1E+Iq4^p+8q)SABF^pR}Q9lrH$Jm=Ddy#pLp%;%y%=jCx>et z@d(r1&;l33cv(&K%xWdVERxW}`z%XnMhgZ^S=+>FS|3m1d>( zqlQnd+~R^mg{Do?KUtQLYi7hCu>N|C4#pi#+Apx!LH0$x18i-Whf*YV9L%ayk%AaM zt#)`oB|i}t{32_Rnq{RDANR?zyZiJUvj~BLHYckf9l+bc>Y)YM1Vy*#A=wFST{Ia| zDA^F9PaJVhwHwN^G%+pqd@^pXcR6COV{zzG+NbfE!%_)NHfKCT`rvp|_$an@8Z56O z<7i`9YGD7&Ej@8wU#GGWd)F-r-7w9 ze+m^u+b-=XrLhs<*XA1M>T=;lk12J+s#aRzz>yWSGbwJ|qgJ$sa=fB;FerfGjKRpI!)|F7w>aAU zCy1SNNgJg?PxtQBssdkpRG%9WMN9xj_sB5XAXvq(@fV>NrMlE_3Yf&~CG5i|)l17si6 zx;F-DPjRiOC2-wRKeJeEThqVfivMa^KYqlm?XZXDTghE|Ct6FP%cJhQxYo}-!n}zi zc7rcYrA=UJ>=t>+Oi0UtCUIYVZqki1L~VQ4?!j*l_boU|Ja+cp$S`?XKy9~O61iM6 zb~@kIIoe7M!nC((0r&<}N`&D}p(G`{#V-8FtX&tE2x0>)@`MU&YY7MR_YVp!PQ4rg zQPjRa)@=V`A@wfJSn4-mXYOIy^t1?vktzhP4ldG>YPW*$xVGYY z-wTFPv>;4F>IYrTb&+IvA1xVl@?@kg_`(%E-W=4MtV2!1K{K5Tt<#9}^%0Vc0h+t7 z*Sp?izPJat&Y|Yn;jy4AT?m2VgEp43=niV4#F;12e%^z0b6xK(pRzZ@`4*cDthd|U z%JQ6EcC)vBJ#E9!@>X~WPLkf}0DG^9wm`g!DSCL%#RRbC9_Kwlq9f3OuI%he2s3#Q zp16uu;op>-;c;wF){Z@?xYw`AJ>bQ~W(MlcKQkC_{fbjjfMp^w1AMGN7iG5{v5Uk_ zwg)zAFoa$Q#-765fN*L$HJ!M!q$GKw7Blvl`jksDB)L-G>E2X>$XVLwk&iJ;zy`tS zN;*jvL0+mOv6fQw30(=_{M1@j=6Y=d*v>e(7#4XcPj>ri40UCP1xRq~9e&;={4oD4 z@9jeqtuVZe(x^-)io1qT_>h2ZwH>bf@*WX+q&zrI*D=#i$IGwCe!2A1-Sf59>1iJu zhnx#fF{cSaNCQM6P;#TB(JmUBG+~2C1SKZZc$7#k0&0`!b7&80I$5jlrB)_th zXIG!CzIva~fKzOdW-}^Z3Eo(GK^B_#o;BA{iWB3yh75X26Y*#-u#5yV!mJE8%pzx z3_R}0+89WAM8H@nVcYvXL8pc;%FkMj#k9fpARUVUvKEceid{m9_Z>*rO7$6$6FTZq z?LElu_ssZXXDU-d9@i%|^-KV;ftG~mYak|wqxkeCu!EM-fV2fs>PiS`=7+n!s~TOG z_Y)iGIU$oytYm+7|8zkp8%s-wcBr7~z~&1Al*ZNr`u;k}CyP644Obo$9ETP{Jkb+A ze<^RlHXYybRNceW@rlH}67F`*TkdYxRZT(k+l6h!A+iwFoXw!W2eF1Ly4fU43bU;y z`R+mQviBOlTR_Y8=~b>hlQAq9pXGZDS#3n8wE7Jf0b?4VaXh9z)@Lh^Yhp#UVyZvi zLh(~L2^}9c9Ad6_W)mCYg6pkQI|2~3*2-KwH+vr68lx|XKITrfCjiWH4iCG9lAyiT zmm!4_)U5m0`^F-_xUd8}8b6)>x~7@ORG?Qmt?@?RqW6I11GBw37x|2G9H__fU~_W} z(P*Sm9+!P{dM}|cVK#|{ITE1M*5*UClOuSAi-j=}IxtEvXjv%<9AHQH3gN+x;&)t_}7H4z`m zwV^|qM1_*DG>&^i8t@4L+~+Q(7<3g=^=wiMT=erGymOkXW?o&?F(D>bzO%3n4E zOG30R)P~qb+6X?o?Rei4DOxcTxc)f`=8G9?=n~@}pqwlswHt}K)0ePhvVHEFV_i2O46S1e-qgHj(K&c2;K4wRwIgCi~&--W0L?|ibLwNJ4|+m zqL z?kqMsBPRB2ReGn;R9Z}qOXf>`UrQs$B^8Z>MYfU4<`PkE`Hagvd;JAZoQ^8q#dVzq zLAOAV#Z%i&Evfpoj?pMk?wp@AKbp?6xXdRb@Oc3N3nxb z(@jcXz7*xW%_?9Z!P>FUvlnN{&WN~Hcx7$8QhxoQo~Zsk#j+hI`PR+~$OrR0OFnV> zRiegix=*H*W!g=&C!8b~vKS7g{u-nZ0)r*tO7^U8U+g7IU=Zcvb{QM>)rw?);to3?2&k#4_q(5Fnq70=3{0i(NSpLCV>AynNSR*VK^Pam*_jBsBz?6_HpN< zbM_g_pWZIxEN%2(V&9!)mAxn?9+nxuaaXuy`OG z?dTX671GEf${uP#wI%y(Qs+dt%OS_slrP0Pf*LGk{6j@|1KjLs9nvj9dk>Da-jMRt zpa#A-fWAow#hsbpiV;%>pC3pOrY7Z+GSLM{QxaDw`Z37@CwBjwLYa776DEavcaiq4 z_an2{&9Td6&ksMBkADt5L5+va#KEev+g8R$jH#8tX)C6x_9jXPk={Zq_nTc(cC1M) zaHuR6&q7E(><_VCe61<7qP*<%bZ%4R(GPHH+il@LbHaXeia@zUeTXrerLFXD8l80Jga^^ek=zics7aC_X>Sr@xqni@i!?GnGeIrK3+?C^@ED8 zgQaIu^!uKezE)JbTv9SYKZS0o#d1qGl8Oq@t?m6~7xXL(5Ys(v83}K$R%JX;B@MeB zXr-wkFX+{2X|6Fo3yWr1sz}fNstWugUDlTXqOl7lj&A)tNK;306J%}#Y3C6}z|30E zTj>07vgDbrEeH$hhJHe5fZml=6Cf(lXl)_AvpJl5R`u-xltB?LCY$2tWU&5+$awZwelw4%5_YK=H6BW zmU-7+CV@5MNxbbtric5fPyFNMMaPXlMF*vrT#!4*po_K+vSwcO#;?&h#z`LqTA-v8 z7+#t-{vO`uRwK<35>9+2Y-%EzS3+nk2)O(aTJ^cx%?GY>IekxQl%KPq+ zIb)h~Ph+Bh2L)XHsmaF9695 zD+2R{jbDL{M>TIj1U}&PpX7L1&h~LkOq>ageA{PJ^Rj06eAtaN?T(Jtsl4`gdah!_ zQ}?tD%zCo`h#R?=auK*Ox3VNP5XiWM!pf)Sms5Q_dV}LU64}E(ysW7iD4_MLI(F6t zyFF}v_{#18*ZMUR2#OwIMq?EwpW+hq(gGF>j2DNS)B8n#;2-qfO#QHee{kIa+gBF8 zm*<~^UTlvZKEdwTnY4Gxf|x3QdHNRF5Z$7J_D)LNg4FCQCA_4m!@Z{IhVln+*=;(= z&4+pBuD+KFEEOs({KEDm$>D0X8XKUo9wHkc6-Yyc$djZN zLI91otpH(cgP#o1ntyC@)zM}ksDXz$J51rZ;m(Yp)W~GkkG|<^`R8(zdas&Mc8w4V z@FT4+VQpe`q!th?#xW8obCWmIy#3`tVZL$bo!%RoY3Tzj0rx)-dZf+0sY|BWo|!Wl zKc#`TT(@SPjaj@8^g=f2Gg%GQj}j*3WbaY(*W;e;u6`nkf0^gf;VV1160DoEFVXk@ zquZDXocFy5WY3W?mAWNr-{0{;{V(^B|3y4`JEVzcK`CxX7y{Wo|A;3W8XwfwR8==o z&^mY+;K`cWDhh@ghS~~->gvj>TH2=ST1H0O8>uyZ;>kbp^uF(0E1J=B8k=qgjC`woOK`G33LrJlMy^ScQpsmXD@XCBsk-V{deL%g%g(0u z3Z;2mdW(4}6{hW+#~D%P2sIk#9q>9?L|@0P*@LwGeT9PX9*dV|CPNBKtcO_dm)-=+ zMeo*YlOXWHyN7&TBnF&71Wlf#cAS)e@3MbR;JjAcs(2t+R0px_=fBrKY4>}6GncU) zt19DluyJN$GKrlmN6~B`wUEt7wa4(D(Iq$SmA*ONh*VvASkbv>ImNbRLd5l9gIo^R zg-4jIJ+PStC~2<~O#v84>}dQC$0bsG$bnuC&#K;DYCw;L_-%~E+bwf7W0Nq9Fz@VW zB1~Z0^U&`3CS`nQKG)bc(fzZE;vP=^Mz4-{3yA9|a~4HI-Z7WGcd}|vx&`NI;v{jM zCglOPNErhtpPhZZlIGmh)2FE+*ttv;izI>A1nhHacKc*sjvU^I3fOg!_#z z?+PHx*LEcCaeIYT?^o^OareK%AXy3tXEZ?qGUJXVP^37@_PX@}a(4qY2jl6d27iw4 zs3}lP=X?9oy~cFGi(AOHmi<7JyO)!szQMHM)hAup|8ZSF_Zu1E9|>{Kh{l;91^zY1 zIvc`Ri_~KA;LX7b20iy{@Mg0mAgIhGnhp55llYKDOZ?L{BZBneWyg*(QpN(SZTmC% z&n9mEqUY~kExNC4(9CZpc#^sWSak*iT{%SZz)Qf@g9y4=COlpJz=#-2I5(f^%O`5iN?2}R6 z{*q^Pb>WKAfeqe;6Ws@%t9-ma6{m8@yqx%S_*8d43$+`QJ3^evclz#)}wa9!#(FwdAh1YRzBev?6 zkbHwuOp{cKAaAretkRRMdGm=l4RBAbpfv$Y5@d6bSrCXJGC?_Xp#H{2Vxu862FX`& zY_VqCKU0c!IzKFwdw&<1T*Q#BdL*2A1-=B_=rJLEvClUa%}P=4p$ttjPNa!W7q$VY z-H5tx!Xdw<<)zX*RTt4ib0Ntt(mTJTFs+?F^11>-vjw|sTmd4`s+`) zzdoP^pOLLRwou&@hxAlbt z?UA3mbo1~-f$SK}Ovcy^uo2n|zqZt?=3c1hX za77e;h8YJ^JGGL05@(wf5_JB1;~1=p=ZeDD7hPb(kp?M>wzGsl%p?n&&n3v?k{d`P z0R_~Tjrs=$$YK@+Rw`9rmZQE;5XjYa@)h|lvX3vrfCXj-v@b~(&?rWMo!4Zx?;1*a zP^K2o)!}zkY3J8*r^QxT4dLk314=tQZ+?#bkZ+?28*GUFQU@3>0FUFfnmEdW0(O`d zI7(LOdY~Wl22H`I-~PMPo{q9w$&Ol4wxvCr*33lkjm$N)_b45FQ42Z>TFD%<{ITY} zo-2^s;c}4+r~Qwo)KrX{?9*SBpVG^;5*`v3v^I-D^=IRapre_yeH7u7g|vYtC?3~A z2swsFCCn9P^RgCb38?vqnRN8#$HHHeRa-8;H@(KR)9brJUF|$D?TMy?x*H_TlY>#+ zG-lEi`Jh)SDYG5T7NAtr@P6@jcW7Y2!lhP4shV4t`@46fy;r$K=VA2GP(F&%f&@A) za+x-Yg~Dd;Y-$0F_XtVrarPdw!g<>6m&1O;3t>H_N|uQBU1o&}wfE8$Z+Gl0b3bcv zd6z(I1lk;q_fYC87epQbxm+Rj&1}S$yEj&o!f|5~{b@#LCdPa;-%ozuZ*^EeEbQ9o z3u5_A%*+S$xGye%vb%vuA#LLu^3-JirhyL&!y{t*vv?f1pS->?BqJLdW$|1o@F0ym z0~`x)T7d0=vwb|E0RX)LIEoXd^~0+fQ@nw&BZD6|6%-+^%wD^zA>R;O-LL-rE;>;o zVR`wENEh^8(I;aUN6_*5QE&A*s2Te7NHb8@RgjYj#W)~uL0>;*sYIhUn0T*hzRbb} zn_~JY)25L+c{J9!Us-bJK1c-G4f{N1u@B!v)S^B>ijtKnqSa)j7^FatXV-N4-jmFmYKqUQTA z0?zzN*i0&HHC-tJO|+?kQ4e;(7tP52NZ@JsD_M|YPf{qLA3-VLue`{2jL?GW5L><6 znwLZBI?#7P05D-1D==&@?!j|W__$`W+qpz2%mi9Ot~YF#df3F2=|)?9Ja7#E9^ScX zd7$5*BHt^gQg;{Rvj(j(h>oTO#|JQXE=dwEf=240Y&(wrY^l>pwta!84opH7+`?V@ z!&suy$e4LvQ_f3?-)NwRR>M2XIZ$t*m9V6yLx`qjT~XauA*S4?J?cel`n~?Ed&!k` zY)`d4#CN91Q&uoD7%1LvW62m_OuS1_S_&sKl8$e5HbDr%>-D|U$+>w}Prih#Riw?@ z4D0o36u)*jlELV3Wn! zqQ!Unmvp<9x|a6M3j?%fDBiY@9Zn);nvg6BrsIb!3vC)C`m0?|K8)K1J%Y@qqjJG*|WHv;^QWTXIs; zgWI$rgg=D+I~nLH1<(f?Fk<@Zl%PSNAagL!LSjA;PIukROSBR|oa+HF%hK(fB(l-# zy=Gba5>8p}M8904OFq3((+sAh8|_V^14u2c_<)t1IP=J{a;Zk|-3B!aS#HPN-(nAl zPcf_>n2v zyKFrCVc3igC>U(HufsQqDeh7}U~Onn6Of*oc^~*KWvj`KjuD!n91=;M-L(OHRM44Q zD$)1=rOv*wN*uLlI~K*#d#)Ib-hI zgmmo_DXg`J#Z9{Lc^B>OV`R>ZeXE6J(Au~-VqXO8egH`hn?wO(!psiJbxBVWsJC|{ ziN{A#+(@a$I?KQl)qd|%eR`-b9y3AZjWJ|(bd!r2G||1dF?oNB(}fDD$Z-BwDF`xl?WAobq!t)AVAReOmjq8>aD&d1%gLQ zF6k~IS7OLt|6S1%zk_VgsH5iW3zxH<3$88u4%-SQR)Wd55=mMMxQCX_UhJ4ziG@`v z-lK_wm;l8MW0(rpu5qRHDU-bd@)uZzKi^uy1;rJY;a`M5ZH8uIYWgVLaN-`)wmN<1 zoh_s%F*CsqyX7K^M_ro})U!>#_QV9F1L& zd;)|OplE}t^o$~g5%w^!fOwPGRE@kba9FvW^&1j2M^QSUUaoaHO+ z5bafF3sESmC_M0qcgEejcWUa@zxqBlKOb`FsbM(Ld+OXJpVf*hbjG|cn%Q_q%osyE zOoYZu;Ub;q#NdR>V})qBv)$)~6rY?|ycOo=p*W<$ZqaaXSyD~D(_nvt1iDIlm;6Bp zHU1GswMhlM*#@k>&||f8*mxQb>fP15HBm(*7p0wD0l^pQqw~Up!*Om%1wV(@;f~ko zn#Uwmden}z7fZP-f=ze8Rf1C2Vq74hZFUP1K@$U?nd-Mv&OFGTZB{ajXc$L2{#*!2~DZF78cy;|lLzLhU&zCd2m%&{y#;0VL_^7<5=69I;86 zJOdWG%!x{zzWwRpVpd8vi98u$tXW+vlO^ZlRcHiakvbij)w(AjKwod&uFJ9+`WgQ4 zYB_~3mB`VLlJJ6a2zLfL!x3qRw5xjJEM-o;7+97XT6~*Z!uG-jGNz0F}66Ruew;5I0(qxpjKX7a>%X-s;k z=$mxz3tqs9#j?V6rTyK{7&gjcDUU}0i5h$d&_X*&@>YbV%DK!}12!**)JNC9y{k?i z%6s|#Z0rJUeC`-D0_>+-YZ95jF@v8bab zth%r@!}Yw*Bgo)FFCDMSK?Q}vnB%k--tQ+Jj=VZ=F0Jc^)NG3Qdd{my3GadX7PolFe<29+yn$m;yGr}qVj?5EEC-8ICY zY}-X+4V}vb@Hg@xX^bF0hl$g(o5RMSRO$(*bgI9VJ*Zi8#Cp$J)c3nRi)E~>u1b5S zx_q7WaEe}TE#JiE8~~!as%uw+`&bCh%_!D#TgL-;;O$+!P@J!KrEq~w;fvv9DD1swqd1^Nu>E#R;NirZhC@ComN-fq`q#Pe<3?V>|63(Z` z1_Z}VSg;>%qV>G@SGQnS^Yz|&$#}Q2Q@31@n0qQuM@59d_6%MDDcuh^s>pJoua?wQ znf5VW6-VS}+$AP*NH>VTiP(aebxnf~dB1#|4dD5o@F5goEWQiqM?rlNB%o&Ac!Co> zt`r?Y;~Wg?(Ip?!$C;$I5O)Ee-5pm5f=eZ3c`>~i1aLEe1rZPep<~A3WafgVOJhk_VMMP(?~Jg!y*{b zJ+1;bdH;7~ApN!b8tDHT68#^xZCA`i*Pj+4Q+(VL2XrqKtI!_)@DF{CffgAzEtk0qUErkw} z1LDSmwC%h1w{LDOK@M7b1q1~IdIek*S5s7m95gbw+zv9oef+qV{&Bf$P&9rl3G#f$ zd<5&gMUoL}3rDYRZfp#VVS{i& zI6&zRy1E5kG_p1|NoKy@Ba{of0Pa~ps=0SNkh&jo4sRXyEj462`g=|-yi$? z7i{k51Kq$a3OGaDE?y1-p9)ThWJu7(?dyBsdiP~eC~#e}eeL`)_ z#uz;F1zhiP^Kx|u*CcR#;M^b5J^yh1>wCc?5YdyC)~8IwZLG!B)l?Kz6;zd#wZsn_ zn~Td@tI2IY@Si+_&Lg(J4`Li}G4!&xr&o};A*i+U;^zKtiU-71l~n*Fz1;@Cyz}FZ zU+Q-K^k34D!4QZJ%huLb&tKAPz;{=VLLltie@T-G1yup^d1Zak^|I?P`GJ4w?`H&h zsHy(=n~vub1j4$qwM9&XKsa?E5aO$?Ey}5_EgHy=&JhBc#BMc1xLF|g=^zYrdqL28 zItFgKt#$|$+-9Wvar~As9X-PiMkZz!R^Xzd9a6muLQluQK)-{5k#So%>3+h6|F>`< z(~pCViHVJqjg5_yhZ7t;0zBJ?fQZ14L*&vIv`98+z=RKQ{`TA1Wd>2L7@2qHg9H`EO}Q>nG9D!T)NmD zYx*Py;pAB66Gxk4ft#NBBjH|Rk#}MtwodyWe8|e{zkT8z7+m#4ggpDxGa%2dWCivb zdFA_Gx1mcUbVl2=oIa7}aS*~;G-{WQ1uN=Z{%I_gVln>YxGavI@?+>{x>vJ&-vx*7 z@3F7p56OI^8@1bZKNQ+Jy3q8f)NH4<>9f{rCrs%RA@r3GwAaIgI*;!Eihh;rU0{Ev zvtq*ZDKql2PhXlqwY{VWn?KVTj+H>x_$^4--5{U7zx*~x1^x3L0{aC-`;kzi)JLhk@je5o>IoVaeQof$p zrm-4?nb*mD6{;k!<~gEW&Lw=kAjaGH1UnijBh=M zwCznzIpvez%^IkhDF%`lIzn(u=g!yGB$UaxuC_l!|@hPYW@K2f5`E`b+sc~MusJD zbLO;exM@(GDDE*(BRiPBKY^Wt>`FJap#Cz*W8__bT^H7FeAjzbWh6 zGf5xRmdM4$9dILi@8Rb7Y}P|z4(xZ z=ic|-N}*eyYU%vqq2JOLB;?I=WJMAhp7Ygu>e%gxxSAPB`nG`zT5xn~=J(#Yn%(Z= z%#XGpx5|Yc1g!LIy2T%OfnD$0dbcSf1J&4PJ`)=%6YKBm%4Cr9z}hQ^{sENaSiO&N zpSH|!gh)@~T`7b=EB*Z}W$)`}`{_-aFL0Vj?|F2OQRjx>g{%F~?|wb9qhPL7v+y)G z<&E~@Az6Cfk=^~QxO|V#A*EWu5aj_bnJ;qfYe#g}zZ$TFhkL_rp1fJ;5h4Ft_4?I# zRPbtJTA)3=Z)bCH=i(md+H}+7+cO?@XddsYhi{udneHIJ8hQFaVEyFR-ivPd>EZq6 zg~g{dCtlFp>a1N>0u%vecTj@r(-0gdwD zCsRCb&kb+Pz45pZ`%IIC!n;T?%m2`XrX>wWNKyH}D zg>&Wwdk!ADc*yGLH-g+-&Iec1+4RP1-2=5YjEI}o`I#oJub}eEdtf&l6BO=o+z3Z> zT&1```fa8ZhR!hVnac8Yv&4%l5%?nFcxurD1i{1pF=VUn0o3dA?F~G5S?ziU7;$-mzDO3num9JMh%zt1*I)!d9t;~lZTzZQRH|< z;z!~P6^8+fdo%Ok* z793EgAj)5mdvb#u1#85IBE(}QhtE}X&8Fg{61r|D?qNwjE$8&z8_!FQ>^%{kce%i1 zwWhjvV5o4gT6AU#(_iSj*zDQu-|ByU;7)txZ2yrnl}PS3p|tw1vE6*hM#zjNrLe~cYMRkzFytUrkJtXoKCSe*>qcA|H@LOrZ_bZe~CnyUh$|M-X* zPd_#(V%P(>-`k!Q zYw|u|T7BX=46(jHEgnQ$iD4X%p26G_9bqT9)9O`ljMSk|xb=lSh4HBJcS88F_1!2lj2!4Z+P?)A8lbUC0(VNZ~PJ4!}VF{ zEZJOc+u8i~vP4LK_P5|S&TlW=*}10`4@?&;NJ~muPD+IUDYdfPUo`TXCLXk&<~UpK zz`xoe`>H-89sVXsy;d-`iKu}R;jo3O*!Es5G?PeMvu*V=Uv^+iKdS(m5wVcUt^BS` zk6yPox*%P=kILv4e|EppEWoi+?$Xv)*!PNrYi)g1&aBSym5ff-b9X|x*S9ZcCB2SO zHc>65`!Rl~y0`x<^2VcLK|16E>Wn*mYf7);p6_8tCy19S8SO?inM;g=bOf}%K7K4(ndRLs+@aU{QQ0np3#DHSjlY~>NqxSq?3lOJlr9$H+9~m!j+C@I ztv~(pgVe(T6R6(-4Kn^&+kIpGu%iE|eWcKBcKFh-&bnBAU;1}zt_h}f!{7eerS^VvbJ6Nl% zZihmb*F0`*(5m%Ieyg^8bc<0L!kXlx>dYnQT$B}~Kh9VsXUIkc-5(VCeo2)ac=^WU z)@C1-z#6{eBV_f7>B4o zU=z<}i-%i!yFHiqt>>tTWhc6$dE22j`iSc@ug#t@!*&~zIH={xpkIl(m1)JxqWQHlkJbBqM`<=6Zf8Ky}`H7sc;0fF7J z)vi|lgffpK4`XyS^32PQ3_boG!Ew++v4SQQ4RU*H)^9SPV?x(FI#s_&X9a&`uf-A7-5oQG@- zD*-&)kB}iPUd21iB@jtye)=spR@(;;hxNz&Y)5?2-ZYB6mYBh|jl9)RD34bd{Oz6S z`2smkx{H#jZy9J=$%&MSS=bCTf>SX6Z1zyK{a7up7R?>iu?A-K#2Gug;kC zX})d@eg=dO^Ke=H=84F^JfX9Ao{_stzgfD41r?4P>&7~cdLHlz5MBClOne>~=c{{Kpvz%-e zbt^_aMvp)JfT-`uzsC3kpUp@Am^@UhG%gOitP443bY6W4MC()qWAknQ*U{ zK8koeN^Mg5Xl#rZa*?U-+uj|`<9taFn_)$3fA*N&^YKI*i^s(uhlRl$(I34QeA1{y zo{Cc|&fnwT-T404ZKUy5fM|u#TGj>Q$9l$M)M*OC!s7M9noA*yW9H*&t#y#p^^5&F zBZ{1K8J2IuPRDS3I)3w3-Y%5Yd;d+#hNrrXU!rQE4#|kv940`v;P|kivgrD%a0|H? zSb(uRq0B_-Ve`P{ z3I4-r>X)AKGx2EiQ+h#t>d=n28(tonvg6WP-RJO$?9VGC>;j`k!)}Cn&RvRH2z{HX zO&0&8>GudaffQ-6vuLhwxgE-2Xq|?@KJ;HA!w$dq0*gBKm@(Ij!To{{yFF*)otHNmk?D4(_?BK)PH-Q-TRakHZQgzGYw~9gd!^ zali4^Y6m6)s|e9^@G?3)amwVA;%{x*Z;jwx)9g3xlb^lLUSo;6EZTc*_KJRV={5xW zqW4iP^z-GtDp9jqC#RbT?h#Cdy2j}U!Ic~Grow}7gG@k&+!rL<@|2Yp5+|c&dcORF zO4N_3L-Dc;&L0o0@800)R^X|)E1}h`zy0cnj+#*5Hd&%=^+CpY`dg#7tg+&SXVoFM zzL#Pgy%d`s(~Hr>47MLd7w%PDdI z3rY2ABA*Va-Zv&`YM+uLw;t(Cp%d^9*m5$7NV|MB@=o4?_pts+zu>HiJL|UjHj&}o zZ#mjV1n@47LnV}R2UKFQBLkfzEos?p0tK3fNNY>c;r zo2R%t(kk&f%;jlA-GOM((TFM$Rtv*hNYAf+LWb!ywk}VYGfuxB-F0es8Dnes^6S_c z!H4e?EUj$k2kn9r@Emy`*LRL6bR$05CJivDRYf#YRXhT5`!AwYpWr5Z2h$~41LF@g ziqdY$opZG>?&m+gT~XFp+ekE+qtTiF0l9z`Z%XQzsVx2^uJbkW`0}=rM*e|*4wJ>z zqNhYEm+mN|8e6B_+Ue8y((Iy>MSTh>kIs-IDi~qioOX9NY316FHj9`{2|ei-e`W=S z@b7tg_UuIKo~d}}Vg#WYFI>E^5-0s4Gw6d$*^lW%=B3n&F2$^lSYqF_5OAslrine z-=Gyl8@%s$Z_up1B;!F$5WUXGD*^4|>D;6tS-d&x^k~V^kz=h%s{^*LCPZgM!dwsU z%-fpQ;a~40MxB5}^nKGRUkEkR9>0{nxr6yF5WVexo>e6`SeX8?Xco|E2DaQ7oBnku`(cDC`?IAxsh}ChUmj8?S`7k zW^K-|?^C<u1*sPgl33oLqK#tr_gXBuRTVsBTYv{aQqbM}VIK9r;rVOgb-2Be_m+yODM;hgNZXnLXPZ@T^a7h>A)_)uspgMN)}o&#^YZ^K&)a z`Zpv0wNc)kImKO;Vj(1cro3n>bWo#DpEs&p;exhvnWo@s(_r$Pz}Zu#iRXo`GQC8` z1o_MlNN9UqLdJ__1xfg1Tb#~SsOx1(UApR)-qHO2#{JhxA1|gUpmo0aG`v;sO`x0c zec~xKm+hr(y%_K6s4U3PoFwznms)Fg_?BwI1&chn*Lha+pSS!VJVM(6&BGHEXGsiB z8J&9d$^~uv2OuB-hN5E1!;nmv`{~;HsTy1T0aGmu7ss~tK3%%&emJG-D`lj8E2*C+ zpd1FG09=ovJ)t2<5@jvELLbu@^}GCsVfE*4jZ+05TspxwWapx@K~db@zo(?V1yTF$ zMlSk-dx6n}P45MWs}`N+D3e?seEC^n4Bs2WtoQ6lF*L61^@-zIDP!pyX5Sy{>G6Ns zd*1wE!e7@!a8e)jlJwPf{X9)(tSOZ}Z#O>fL)lq(8n*c}#1H#gbW`7p4}On768<>CHv$r9%=5>DCV0WmV$>kT#@lyP6*mhaZqA za6VGA_!Uig!@LF;@M)MC>zi8A+_Wb(^=Jj!hWZ#QZ3ugvIr14c(LaxR6`j6r{~lEV z#i_ps55I!3GB0qjiFz+k`7$CZAkarHwT8!pMzG5+vUp%;OP;Ywz${38gmmxQ56DI> zICA(fhTNH+d7qFuMZ&QK$m=LbDanF2VQI#Rmw3VKpXDhB$pvT`a0s`7fE zbWB{l%4-x~}1kHFm#DllYsFUH%da0#5=SOauNMJ4$=>ub=(t7ymvhQlJPlS|Ns!ghAShKV`s0gj}Jm ztQQ(l^hL_=xc!HS8{c+TzPZPp5n^rjypnw`T$YL>-Fu9D2jhpii^5q3%)mMp>$lq{ z3&+pn6%}AJRVmk2-}!hI9$R|7^Kn!&fY0NE4)@iYJ_lgW8G8uqV*kU)iw~dz89L+S zY)z;aUYgiuUDX}Hf$%>(P$V*R)jTk|?h_e&+E#TS;_XDN{S`wea+^M&)FgIe0FOdhe^f)lTG&=UUCRZO5+ zzmy`H2Y|tMP_OEJG#2Xu1t>B*b`(mU;>a0)%T?WgJ|Dg;riMnwxR zQ?KU5kDU5yR4tat-!n}WmbsMGj%rub)Oh+a3IQ;zFoV*EUgXK~RuO|@A1E%*3&}Gc zWhaxM#GqUoT&}lru$^b7ttKjBmU+gQG{$q31_261%m7WlrNMHd^Vw;;n#`{Uyr!PE zD+*$LxhLK~^L>4V{~^r@yCF=&;#LU`L*OMp()1x3A`aD*5~7$(Q>{0Q$4l-Ib?r~Q z@B9s$t{aDOw_}u)kj2{MJL0)J^vWDqFdM<%3jj{kk5U01;=|Ft#Zrn1#kL?q@0NNFcG{=CuN+VFSLoK3Jvc#5%rmCqMrl zEx`qoJ#_sm`-MaH*IrT+DGknsmslnD{=yR*h@u+}NskXkjv`s`7M%j{mGo3`ocX;H zpX^uBC6!M--%GFUl!Yd}vk{t;5HE5+VYgaEVzmdg0(3_}DyU8b$RVTffH@Ob?REl} zE$)Jxv#rKWJE9_T3{}^2J0N)ZH<+hiur5BXDww0|eLB;z$4!}2*uGQEnT`>%cOniN z)=D6#BG@KKm!stXz4hEd^MyrRZF`>WlcHgqOxMVAf#a^FCvD4o|LW?>Oi{*2R)WdU zZ6pwE{Lwor(Q2V6M!3^g&NO3(trCYKG5(VR$87FQ=Yf83ps(O@Z1)%-N_YZJ zhZ!})Fh~7iUQ*nM7uQV*<7^$nwCgqMnf4ql>UM{<>RtNr`&V5F(|gPyIe7vInl>aq zr)Vui(cv58>PPOWDCfDE6aj4J(_18Q*NWxy(aP8>(k>3dtb7lW3=fh#KH1}_$O->| zDfwrdI;eiPC^;y?u5GChWheuA=zHHS(kY_jQ4$f@mTQZhj_0P07p@bY1XD_9ToiBc z%C>~y!;Y>G-z>nD5xM}WDiRBTCTuX6A?cU>ObwfS7_DcXWZXzH^?6MRanGP? zACMh*Qwkh-9U%l9}v&nACP5%sSwyP1YO$#-J4om*I`8?;n8%f=wvDD z{CqpVo$Yqnuy=NLb!XU$Zb`8u-w_{t6(I=-AbJA5gfF=5d)l&qCxE^fVVkn5gdOQ#Psw5a>91Ga77=qX9djG;-8- zvED9STNJT~g?2HD>Vl7noBm{Tz%#wDS@Q?xjts_IOO5Bp+_xWgzV-JvTpd{_f6D<~ zjoPwjG3nCheD{`wGS{q|(`Xz7&pdcr_cFPqqB@`nF30dM z_fsnnaZu(DU-u z7rBZgxzZBQ>B5F%#H1sc9T1(pHUPXW6R2@k7SLus>2(<7c{p(4+XqfN*#+j>S{~Qk zlxhjL^Ux`k4htRnT%?hz5umeUR_gc3l^NejUB)|o3p1DP&E#@ zNDJl9MR#9`hSNcZen2`G0Wl8}2VDoKmjsF1WdGfcThTeBv-NAG&a7l5%D*bUm|kcRymcW*PtpTWl4r1kBiV zEuM()7#8gqTc^D*ng5KNL<-yVJg*TYD!`uchU!@|4 z&rzmOH4o#{WugQZ#9ob)3cRQMfj|q9pF~-)piPfve^k0P-<3r{>E4cV{oTqbXw-= z0!hmbFUq<=VMkd~^T`{)`11B~&s*Ca26*lI<>ZKpdIyUHf3)7HOz4b^Yc1z0bwE%< zzOaolo;_n;WnkH8i(J*YNHya#JDudG=yT>fjHQ=Gy<)7LJCC5@?zvcaGAcSAT(!WEW|kEqsYS8P^wbhvn8kB&bhTl zs`@!k@tH_Fvfm(<*eac8PNwjq+U+sE7D*J%_u8~Tz_NLADFV|)zdtpdH#zmZY~AN- zNK?I-hWIPfnNzkg9S*K12Lw)~;sm$~@aZHD4MHmL8CP`?NXNkq-)r(Lmz$xM(+aAt zz`b9FmgICdsw;gT3R7PF&@T0?9z&E~7ZCd`Vuah`j6eA~;6W5Y4nk=^r(u_&omET- zZ)1H-37}Sv)=|^~jv@S~d)4bsdfF0h0_vSzcENByMwKlUE<=YBC-?LK#%v7{zr_YP z1I!&ZWezLv<1af{;V5SHup}tYHP{V)4yaPJ_O&+t6PJ9WcY+?t{Z|> z1brMBPT})=XPN5p!G<`x_zDu&k`MgE$cp{-sgh_H+v%#(@iN|vUOu0$7}1O>%$bPx z$XLF3Y@`{2K7v{*feL6kJ-wi*R)j!e4RA0*Bv4B1rihv;Q?SHfdrVD(nYN9Wz>^)f z2Itb{r)+&43kof!jbB{i!4B{%`Tkx&S~#kXu{Sm(nH)UuerYUw&7u|&V!x;A$Y(Il zuFL!J(#f`?!p@hoyg1f#(AAjDoM(vq^Y{X=N3u9!R*tX>Cuq&Bhn4z(hNo+N!Gdn& zy_yTMh>V#YoC*P*G*OE(0-J^7o<^}fMZk!z@3&Fan^$Xh zha#I_Q7%YdR%z%amNzxr&T^UiQ<{Lk*9iRm|Ns3ow|~Hw|06N)f45=$-B|O#d!GO5 z_&*~Q`mc`tGw=V>Y$W|d2b|wKyFXKw|I9vr5775Qsza^VA&{Tz-vab%N{Vu7^12FA zaw@v2Qi@6-&tFYVK~2g)K~Gj*Pu@Tm6m|Rr=zrx3>-+`K{{rZL0rbBB`d{8||FCEK z&w8u>IR#{EH2E3KtLh%AhO5262wT>pwia(VzE)Rv%mLw`*AB=EewoU;+q;iu5}Vwr5Q&uZb22y1J9Nk` zGtut%9XklRn9x<{ZzTW;BL??k8n)X<;xYAAozM@{Hc6C?F3_7lWZ22}z4Vr7l9{H{ zm(iEIxvRT+&qG{4`=)k=pH+Lz{OU{TF;Qu^6AUt99`_leJ`**OQ!pBsw@*61BZ&)i z-i9wvCvGcM_&}~}Wkt4QYPYtx zB=AX$y2MU`$|15q#EZo!FzCiaCARkN3zKukmrTy)XzXx_9YJVsHZ|2E<3|nw^&VC#_!6MRREIq2}BQM&Q1Zdt1WQUr7+IUsg5|9LmvX+I+ssVYlWK0J)+&N1>}@}U z<^)LfqRYL7hwG$1OGbEiC=aFI1?fbd)a%D3;*2LB^2FTuqR`)4f;5S`3PKb^$YCfA z2-C{2BHsy0;iglrPc_ScJ`f1Qoz=OEiJPg@sn%iR(r&qxC-V(_!jmGBQfnUS+ks{z zSN^kS)@MZI5u6}<$|1ht+G+qrs|=b4w3^K z&lbMEvC9e#>ah#TRDZE0Q73ukELGKzMf{tQtumq}s@=qDMi69cfaCBX7 zVezfP`K-{BU#1WF#{sW_N8j>}HY(SoUE=O=uc?MD`Zq2u(j)RM$Lj-Fy=-sYTS%9C z{VH#j{&VKkHRW8PaBH`yz$SWCtPJ#4RNGYAJ z?T03w3*Ay%4d%FOm1DW1d)rb{GHPhD-)m~|6$(u9%SUJjoH^>J&?~8WgqZxc#FF(w ziTv}g8*qF%I73s0*X{EJah6cZq^ya;tB*@A%gojp@# zcKA0g$L?0N9|ws@+0PBhNJNK64+SrjZ=fKpG(h&Y1*B>FmHh zUf9W-eo11G^21u%Q>ARv*REp5x#<^^>R6pZvsTVnDs<4 zl5|iNU##FxKsEtQ?F?z2g(!*C!Zu`925FI5w&bT)rp7%X(SwDU4=l}sAJAh17!dhG^7RH@nXY&<{8mvrlt&`*ta}T+ztryeX#Lv zX5p7DEi3(s^UAS&cbIywE5lr>%HQ&iVqo8svcaV|j!$Tuu}lgQgv?_Rl(7Zcag2Yu zzz2k+1xRGOPG;RO&9ZMbKDTybs8bf9j#qHf3f=z>m~e0nrWhl3Jn5a~;3u#<>itlsa~ z1|+7F+{Zz{bJ(pzTXW)NT5b6iuQ#WkW9geOM3ewtRzU-rQfp%yx{oQ|R%AnCC9Zh-Ru#_AHR$7b*74JYS#--t_jy z!VCU$Nh-&GK=hs6mblugP6Dj3dvCyks^i-}p(l~2C4x1p&u$mH;UV2H!jmB9apN49 z+O=C*jps$s9){x2f;h!cQonSP{Klj$xQ52=Cb`PTc3kyCE&o3oBB(63a7k zh?LAL8Y;g*u5OWgDI?ID_>Od5k{Af+Z>I}#k1H)E9KICf!@FS0`~4KnB385%Oig_v z*W#AlG8kV+FgDBgJf*v~qL_2KwQL#*0fU-`m|{d#*TS(;B>|hxb^cq4Gsh{2BMBoX>ZB{CA@#egBNyToAz9@3;Ae+IS3f6lL98#>@q!MK?AeVq)GBgXKXM6eEJ!~}7 zw(~2M*wqe6fOmzhY>=PpnsMhhxQK(BiTu&2o6M(Uv|5w!usC#Gc-u&d%B==8Zmx@< zUh?jqx)Z!D&DtsAy74Ci#3%aBTV{L`J@ldNpDrh0JmI(Z1vOs-u3jqz>e0x)jL_>R z2F0>JG!kw7mEF&updPZ3upCewhD4s?Nc32NGkL27e(*ZHSGPNOvOqNLl80vkjWIjx zg_8r}OlthQP9c93@%gi#|20nj^9BJJ{t_XLAjyNzY68MZ2Xr|yLtPo$V?1t3toC?1 z&!DHZ_3=@*<-Exm+o;)I%^pP>2`~Ajts||HSrmFbB5VW7Dy1z7V7tP3No9`eQ_pEv zGm0Nf3YwB-E*0{XI$ho3pU!g)1E~&nf#={MDabFNH_^x>n#8w_b2G+E^(HlI;hC>g zH#-5L$;XObjgg4XhEt{{Qn_VBm3*yYGY`J#Btuy)@+vjNFb)lZ=u`k#S@GMb@CwaF zPOEOs(!q<2!4dp^&{^p~UmCp0gIz!OqsQ3gIp#!_8EqfH(z5 z6cMSP zpDG8z&{be2@-+}V8ze{psY@2T03je1eY_b8rp=zBumJgy#SOU;6@4Iw5$YLdltI(z za^c>oQjZMowFn0=xBcD~fR=cvv8N(@#XNTfPU};6qz(wJ8hILx>5`yrX(;EXNpJk{ z=E6aJ)OAO*cpCTPJB#t4U3oa!0&^ZFto7l?bNEwASTtwsa#V26PM?w0f!_$O(J(XfcNg~{ocX^z#_t0X- zXqzUV>Bs?5`vT+##4?(pTbl>3YbG97&*yC5_o!Q#r5B3Mw`us@gHYnMM-V zT07{%o1S!QEmm75iWbN%M}!;Zz55siFY_W`b>8F=XmUjwn+p+>k2vKXE^Qk^0^#?d zxbrARgz^aGc=8h;bd5?{GgsiU2aYBK=+ps%OA#fCx%VZA0`JO$D-~b$zF@c`sQc>L zlQv6--NWY~S{U{S)ed_1enX7H9_>*PdN=35Ie79kh=;+s5H_8+(3{hEyS8lP=}CXZ z86_B-*M0k0FT9R#4$Qa71AVhGn#eJ#GE~fOr&ft-Z-ZCg1gMr}W;K=x=qf~(9zLKe zL7T_mgr!bEXu$K~w`39f(nqMLsmA$0x z5ov4PyvPz)xq!2)vskk_lWX@SXv!somiDVYLznzBh?0N?K_6$=IdZ~_@CM%F$3-X^ z-D)=V#?2c4@#t=B}F-#&s_g`EZs5m8{f7 zWO!s4SyyMZ#PnK9-ogw7uT0&+!r4g9KyM(JTpHVnWF(ik0?#GA!d2d&l4X#>J@}7C z*?w7r~^rg<~8wFg)&N4iHea@{!br55hm2P`4rrldo2LT?M*JCI|)fHf@0}5tCp(j zzUCm02QaNKoW*S?Gsk_`FvZlF%FIORT)$Pje%KRtF&~xoe|OI`H3Cgi)in1K`#RMQkiLPJ zkqRUyGT4Z^%iME}@2N_;6_>xX$l#e_K(jx>G-;4+ki;#T#_lENbt=?&IgzKJ?|5+@ zMXOm4oXA`yx7O#w`F&8wC|olnSzOxN3Iy-BjpgrEiCdxKQ@)B?Dpqw9!an)2ghwt{ z48rbTQOS^2kz+kVw@H`q6QcP6srxuhVzKDJRAZ79`Cy6K%I^g`RyU0B&;)7GCZbYh zae!z;EuJ=MPLcQC13^cJkLoo z(7^k+wl>S+b>qaf%U;4Qb%os0!jB}&Ir5aQz2tU3X;B2AR=`rWNCFACBsZLj!GvFE zI!hER%R*3S+=bKa+F=1^cC#_}q+yS}cIb!U+tYMOluJMPb1>M_D>P%2$8uF#@?+DL z!Dz`V_~@4SI8Bq44IYx7Pbnh3lW^$H<=Bd$eWUkWsb&7usB9kMNk@4#u~h(vrqF{7 zT3QBNZxWc^5M=&|CzZx8Ylb0VaP}8w%SFo?3{I$%S=ZS(n~9D_QZ?A{yfVfFoG0~R z*A&Vx;KSI|>~uG8SxU%r->t>2<8LyHhP^x~+}5+Bd0V2Mer9>kSr4iNFFbVaI;oBZ z?^1Bmdc7(I!&1LZ9i=d#SP;8Q^+Mg1uLSGYL3h#nyub>*B_3(z|U%DJ&Aj4c20M9_rnkGwi=L zSBnenz5~W7^n>8Vu}zWA)dG_3Q%IiCqXoE?aeLh5!bUgYDG}U4{)?Hf$`s$G+CYJi z6Do-U?}T##`xm(%GjEx%b+~XePrKp`R&%|i%X~|O-y+<+ur?E7CE?GsW2SZBC%_t0 zLei2P$$1#pk$4m}MfEF}V|rpUtW1&1EE4(V5w6@!?EakA*Y4vZ1WyNHCQ)j7$T^7H zz7mAp%HY4fqW-7mzXw%ag)-!4QZR26BOuc$$UL^K&HC|6V>s`0!siLi7Q^w&M&0#w z${MIh_QXMxLir#&Pv(P3u;{(<9*HEi3WUriQoaS(ldU)$WLL*qShYN8O6+zDsO7F7 z77+Igl?+@}Dkzz3p4u1wbS-7{HoRWZl5Ur6NoEdCsKqs=)xRaXnE7-b8kR>EcZXIW z2Hp`4q7>XBz1+u!LtXt&wLA%Z6mi1MK2zeG!+gU1UPovx^rzcC)`Ep}8o2(ZtiTi&52j9+=w}Nl`-1 z!0PSV(Q^zd+p$P4??6P}^=ZXtiWW2ri@C42Y=GoW&ISfl>B?N^ z8v{amu;z4+T~n+C_BE!#$!tW-GfS<1-impTiXzx@S254(_$`Si zx8L_^n5CefdUF|sPSn17J5fhZv&Fr8PB`1A_U6FFYGWb$uBk>a7--rm(9D}2zZpsu zlG721C?;Mstv)>;bC8tl_u^pws5w=SneV>_0s6&o{DYy%FNWhkM(zHmTF>8yf&c35 z-=l|%-CBhetPsf0_5Xt&D$409DXS_=DX7Y+NhvDIt4it08t6*ts_H81>*?sr>+9+N zL=P2zp@+ZF!(Zs(FZA#idiZZd5C7G@#P8?(9fbJ(|9|!Ne|>K{L81JmOsa-t00}!1 zM-IUa90PPKlhDeKtBRMpw_5SM@80DUD^3IZ$pZFTWmDNpC+6+? z;5A$%eqo{p@;$8XT@FzoDi{dHrpGL@MCntF?rjUKFS?qU{3d6-e7|8?bW23`OSx!N z!Vq9hMf`5SZb@apaWJ?P0Gzehti|7167SxgWmwXmZA=Z>j%`#gL|1)SpAA`$+CRLc z9YSchKYz|8D&;lHLk;`;DwcF#COLLTHlGmnzhLUiHn*4IvtYJS!HVT_8ID=L*CE4U zR6ft<&SYR`0*}|;jVis#jnf%X+WWr~QM;WSJQsy&nw)`TtILei1EBaw!+O)wTGRZG ztYfo0r)gh3+l33AYd2w-h(WBhVA?RapsF^E0KnFR)T#xJ;p4`jwCjY~l&O6|=sl0O zrsW%v9Hy7K^fijE9OGw&q&nC3oS{B6c-qQ2idlNPSSwgXG)8a{bXq24LKYL@Cz~aq zDIvqGUwf9FL#ESAn9=G_VkIo~MCxvswdZiF@wAb%FiS;?bTD7$Svzd_W(#iDcydz- zbDOuKHKStwD@$8;qr#Q?j&g@O8{^HGIqi#yJf9JtmZRsuW-LjdCv98h&5n7$?pn^9 zrxXV1h$ZaCr7*6rQqd8ND91tLESexZYU??^w++?++Ek?DMJ(>Z1d+V@ej&plbL1Ce z--c>mHNHt!qtaZD@vD(6uHw&llhOFeOo2iVa!hIY$u%g?k$5PGzQXYurggT zvyy~3pcfZljEhg$j)H;Azs?^J2E|nC)s9)o(`R$u6gRMNyw20B>u0Dj9dsR&Ptp?a zz~Jne0g3%BC!!Q(1kCL^Q%|;8#&n~TP$vVT*vd)gUyqlJe`iRgvq&2--mZB1P{h3{ zS0=1qERw&D=BVw%B+9rC3}*zQ?%>sB6}g+6H?aKxNNxKWO=fW;_Aene4q0A!ZnTh3S+ z_9$+AYhq+E(2VGH%*1xRxjgY+zWX^MY;AK#ESzP))_|sm{iv ztO^jD-WtZD=BnBk8yiU+dRXZUJhLO<=BiY7w~m>?YrAYcY@h3F|EbUKJ|_=lF*$jr z=g^1WOSJ?7I#7VH%vLf^88#cf6d$84|HUSXpZr7^uQltXa!Wtns%EnOgCL(*@oU@u zw7HuOM^tw&Q_U9VK}fZL8u1Ec5p=dGm-#RPrmtGsPH7dw3FE;kAH?tj#9q!Ed00X{ z{k7%^(X?5<8Roo~tRw!PkryAgeol6auE!C(q4l6#T!C`Da~TGRqY@|zKt5KyBOp!t z|DIP^CWhHL7Gm<+uI@YHD|P(}LS8=O~jEsP>jcz9=@F9^ob)4H8^$ zO}E49j5gPchM&v;Ihz*B8RhmC!GRi%ADy)K?c17ib!L~d9XRfPJ|0=NSp_~cq2w2# zGIMNJ;4KUmd;7)KbL)$&NQoIl*mkE2d(G%e$3cV9t;*$Wo>QqjZsGS|j-~j*zgG^u z>%IU%X~U|a9iW02wKW5t3XWeRu`8_DZ3W^LOr~48=YZ_}j@huB7yTz#3s^Z@a=$yY zb8-1;9E}rlKSDnl_Kl_j<$}T)$jsWTd-!eAuqkig zYa~o-5@NaYQh3o5(j*2^t;+oYxeC^}N*C(XmdH9*QWqMIqbK%rJLm&W*4Ik%=fSpU?p1 zA}_Yi6RRtP$j8=d46?Jev}&67%^*lNuxzjwJ1NJ;aUXS5VeBDy(o5os;9zFu9uz-P z9ytQ?XJPzN4gkwaflWI_y!!Ph>*D1xY4^J3M2`u-q6cE{IYOCI20m!U8r*6maqd%& z-NvGUvum4!%K3h)OjDyttzTDgPNhyNIXh!LF zq+TFFZYC>I8oUIgHZ|aO8TgLp9B}}WaoJEFOoHEcCEn89HJu2>Jm-|n`0IDLKX89g zU=jWFX|>6M2e}9b1CWoP?4S+D4+uZ zE|!a8Ee1DB`!LKE$6OXjr#n7>Rav|1&BB7l4kMjB*#(Ph5bLpnEV4Q?w=WL{~EFX0e&}QX-Z84%6Y-% zrv)d2Cs%$23y6P#>)+yEYI<_I$_BDJQc5~%22zS@ApWJJsH`KUs;8@lgm@3;+6sfBhTruV3uBU+lSG?73g;xxY&6e;)?_V$c0z&;4T0{bJAk zuVc^sGjsnxbIk8v2!CeZe`cRw4sU;eKaBq0h~fVa*w4@U`n}`(XX@_H9P__EV*5Av zkFafs9b!9X+6%;fey)Fu|H$gdDJ%SBJt^qPtAO~Af})g;lA?l?tdg#xp1z)}nvVWY z{72;%|LK>*o6ax%=NJC-3;+2y;y-_;=lW;%`PG~L!D!}p{?k7i&4B!uxe*FO6;YD{ z23_mKVAcD#qu4;@QYwry^#$A+;nqfg2E49r%%Dk_HhX(^=pOu<=qp3p=qEYhKO^Aa zp0Sh2fgcc@)#6E@Y}w=9-f-md4@eh^rej)t@eaws^IW~L)MTt?U(!1<{Tr9>N7V?2 z&d11SQW0qlq8<{tkf7SJHiyK{u!JSuB4>Zk4P88Ox+t)OFSo*@HlW^dQ@-k9P)6&~ z@11__BOKo=AnYJ3iwWhAo~c@C-15R!eTZ-8ooIx!wDLPRdGitqr*c?&188S;aW(Ur z*LWDt;WJr2r0kAjH)j8eK%9Bn>OrnT`HaN;fV^sKbA}VL$t6k-ig0?*ASZ+HaE`iZ z|MYNmCtVulFFVTX?p7W6M}=po*OMgkaRJGSFepOr0)9H~l*0O($wyp-O!)^gv_n4U zd@sjbKgG>|l78Fy)E{brP=97LPSL`6K|fRx7Jz7wtKtTLiAHEa!cVL?=<6Sit>WHL z4m-3p&eTJCbl{fCpJb)U0+<@GO@gL!*3}6~P z+RfkuYw|0S%`zvExr>?ZYfyCMzFD>XYPO`^jZAjdIY-TFmsO7A#WD^OzEY7{$Yq$N zBy|1SZIu5aI~bMMM=fQdcs7w7h{hFmrznTV8-$}o;|ULLDOF+dS@%$;fve|gxI!}Y zYV5*^45)TqlqcpmaxMphvj#<`>D_~7}X^I2FipibsQA?hCsI-lvby^p&BEE+A_~7G(Tf4iyF5 zjD{zZho3vlv*Ou6x>0&%Boujo6PABCgm=)P;Xf)95lxu#VBtZ(~JX5h?rJ0@A3wlE7XYCgKRn6LoF zjPV9X2b6VsG49#2X1ktj;(S}=R{vC5Xa2F>Fa=Bp5_=Xr)*9tz+#HOfrW{Kp8hN%J z15RO64R@{Xm`&G}y`3bf&&JHDujkOeJa!pk_!%qe>3d8;6oNF`fH#hi*!KxrKq`@* zqy%GWw!&$EEfm{Rym)-Gt5k}$eRAn|C5^>%M(y(%iaQRUpPb8gdwr^CmfdZJA_)2z zHhcpGr?i>)5fV$h0lA`80@d-n8CTz!8W0*;VCElnfyG*wVCsCq-OXp!;^v8}<3gEA z$NS7p$oH$kpUw+lJE3(|n;Cet8BiX+5(%H_n7izJZM&;yb>DvaTfJ?>so$<>R*G2e zu<-j1iKd)A5}5T}9MnTDQsUMJ;G76`8HJ;A}aKN(};_PmdELLpQ*w}U2lPew!3ZlrJCT`m9`FGbYC6Ccehxd;s9ohl*9B%5 z*yx-c_fk(oE1P}n$&-FO=ZVj#Idx*^$Ur-xya-7~U?`05$0$l0&;~|KFEd};tjCt{ zik3{*0yO_(eMW$$R&(LDO8f+8tE;g%~p?H-zqtK6^KrLe%&$4naf1T(WnPk(9J1PcztX`H~?@Be|AOv+uaA4$BcULdBL zV!icI>zbVC`ILj*TcbN>;9XK|A6%vV%&r;n?^(L8m2q|}CfCH?cb$A+|9)_X==${m zj!BlM_dZB^eE#zJJqLCzf6^b6)wX*&sXBGS<5LU`%74zj0H!z1KD4LMM6HllUc-%>J$h72{Ay)3h0ddJ)~xTTYR_L+&PYyJo>2RNNHUWK zX=Qg(_vOg_Ku&+$GB(z}x>3QaNSSeKys)_I)TYF=E9S?dI%@hREaJja3sYd7IAvE! zj>-eUqVIHcC)H0h?sLBRc4lb}H0l0u(XD%;L+tjv8h%sNQxdr6`-^}1g(k`0b9eql zZ>r`0cxVX{*RCUr?DH>+?{P4apkK>2*TpeV>cQG#>#3cuwJgFrUtybrlxK} zhbcQMbbVz*Lx%b)hK7Nf2J79mmA~ii{GPk>d+g@-*v;>;o8MzMf4Xu1=T`dHIMV;C zCn3MTqw)JY8o$4z@vGJSZ~4FPMgF;#`#tjgFXZ!g_afQzOTFg_2*Tno_afdtbTw6D zeFI|?Wx9reiV1v2L!Uv{TxD!bS2ogAHeRV|sA*`d2KORrn!oQwe&37yz8Cp@FY^0d zV?cx3hWti*rX=Kf!`SQG^JoxlGp?S%Z(V zz_WuZ$zOMx74WuRYk=H&CiV^jn$BBI_nMm{^019Z&00_dC#`c#OG!66D zZPw1tvC2B|xJ-@oZSdl|9UL5(JP3+K5ee^0daMMd#ui3*3>?D#`nK&xe;?cNEfJnr z1q5O7H@#%Ws+EQ-VTB49tB1F38EPu-{BdI2>-Trh$kZOdAPRJ4fs0x{*?AaW7i|$Iz~@*)~*&7MZR=a z4W;Lvu8>-ImRfW;cBt@0L26`jg-}7%#`~3*UhS}y%|AW5Un(VoPzPOBj471PlQDTc(~tQk!=Y&Jh^wiPK6m>os$yeUDIx%*g2r+eriK z*sB8$dRN~>xgF8?@OZ*-P_kOVp7PrNeSNXSil{9wC8nx&mglQo!#WsO*n0UtJX$S# zX#)Mi4=XmYR&B!ZPutdiijWiyT0bOePyVJdDSqeT-qVhw+jm8U<;>VN`g8@I=W`mQ zRV(VRKGb%|Uh4V0@It57!aeV7n6%N=Nx0ke=2&MZXAHgjRcHOa8$J4b zp2MNTPMR$??s9RfmMzWSQI|Z}x#Q%Eg%udSV-8zP`6}^#Sxt?XnVcQ+wS|6?ovHcn zdv|zO{Fh?zd5h|18MfGOJy=9 zm;FE_(nyV)Xq}0!#mSWvdU>OvpRDWQhdNe`ye>T6i{iH zWK3?~cgoTusX)kBY2-W}G103jS|v-eeICrcAgzAZ~CqtN`=0zYMx|fm_nuE z8nda@Qex`k3?aMM^f%i?^m;eNoheb+wJSAdsq4O7W;On2oz7G>GqdX%pUxPY(%OUc4=zW*|CC{`v z3T&U)-`kQtwBqwsvF++9&I9kRm0lbdD7!s!)vLo@YGpvvQIfYn?^a4(LTm`H{g!6b zeue+2<;AVc>x0`o96Qu(S__T}j~9$DvnjhPcgMpsdEe&9k=~_8Z(NVMbXE_2t1#C2 z_39v3N0qy6gWy5`T;m5X)v|>g!$#cb3LEdg7UrUD)NYS2qhO;4)>H0Jhn9tCKN;)X zdgb+jZ&p1{0aVAf`;lczZ~HUOTUVr;nRGwGJ2okJlhwgg16UU8fa(x z+##`~hQXtH7hUqJnpEA7<%Wj%E#F7|{7AE9Uho4{C}YszzU3(TkLT|L|SZcm4ATC=Oho8^sQD{EK*ZKH*(E74_g$wP4MGq#v zpPYWRzj)U6xu*a^=x11tv2xvaT~t8*0il})3oYkwtCtHY?j__SN&cVp(-`Q*AEpF%IpNJpKc54UUvhZ7%qMkhl9^tx-4J5~E0W_mDmF z%<=MT_vcICYgMXBPd4lAp7q{7wa{!&anDNZwHIYBdm3@bXL>2^K7r?~AU5V2x%b>i z$ZB2gZ$=%TvfOu=P3E5V<>U`@6EF>u3_KKoIpv(W!De4;D-QBV|O1X+^f+70uhr}qiD`q#-g^wl-pQc`zTRIIN7^{1rbI9%JkSES? z1<^n|i9>2;TPq~RWxIou-Fk`t)+%-FQbW01%+I9)1@Z~u7NS~Fm zFkKe?#{wRrEgQXqcJy2%@=oDRCBH`0XHR}+YfXbi$N0liwdk3^%owhM zVXXS3Ovo~?m1XK%xFma4amez1HcyQy*SN90;mE$#kNK`HuUQcw8QJmJFSVd2E2fYu z*nXliM%9wX-)-GXTZ71>`_`A)CV!QoYk6K7dwwztWMok?Db)Y&d?e#K)4Ja!i#e00t7O*tA-?mqB2EeYeJ85iB&ckK;p zuJ%gdQ9doUZj`q)()hJ#cun-BslXkTu`OF)zZ%mB7StAs-*0uh{#t~^(R&6TCv#2( z7arR~7Y(V6&i1ZucqSnoH}L%Q0@ZCnjTtkkR9jY`St^jUx9CE6lJC-sq|QU7E>~U# z7wE+v7Hnvenrgh;&%KVLWNgKx?#2?+7q-k$kJFU3!?ljrPkP_gXN`ur&CfSRJ-rqiA-CC$1L~Y5*3HolPnQ`Rw)nq@v&9Q4gR2AzdS2YF17EbzE zV$3~Z`69k@$;?MJTl>Sz6>VgMjz$0y#mv1VvlHEC#!@z&0PVTaA z&<=Js5zXYVoO>_sWj-5&1l`YEcwYALta#2wduHJH2b+3#-y;K`bjF>gMU1wjj=wcE z=q3wt#Iw6u1+h=A7RzRC@@C%|$LV;=ue(;aQ~2w}_t$wH z;Oz2Al<$yuQ=8S}!U*|#@es#H&rI;SVw1KZzD*kST(13bY;Whn&(oy0l$TW|60|3- z1-J`5=47uyzV`2lZ-_+R9OHdwC(SG#z-#YL+kb_(^tgGO?=HpH<3XCcs0&SPiU%&s zc#cc%bL!n5HKS>1+=g{_;%(pP{p-b7Z+Wo(eN-HutF3h-X=x|@#s2o^ zh0+W&SVNlhsBTE7_ERe62lA+p1J(-pEMqScv>j|;@CJCzVkY7TlC^sOyi(N%Fv#oR4j&5D>)Xmno2i0#gYAbs z%WEy~%1*{wp1pGR_CRig?J>TJHf_TjV^d?B%$<@GZiaLE4@L?k>K95Gy(x&0og?kO zG)Wj(ZO`p*U%Q}?vbMFrJ$T-rewn+ty1|x_0jWggd6~l|v4-*QXYU@0S9l96?C4Mb zKswYHOhPn0!F)}b2Em$a4pEZd$!Fgdj@onlK;9%WcPvy3=*~$gw7C3d#`{l`N}(a{ zsLUUF-Qn{72eQk$RAByLSzm1M6frKQZ~4$dXT#3S)+u+pYyDOniQnAjU6@BzFLx#lilT@W^&xO$TCG{^<@2VB>uBkZAYkPS@k#~HU z$aVK??2Kr2m8~{U+L9s5&@bQDG!>=X9^0ZJI@Bi-al;(@gkaKH(|t9a8XcX@eV+d@ zeYQGQaDm+9z~jlYKe+M3XEgV^|LowGh_Lajk_qnIBRBN6^sdV)%QGGBT?J2elzmMRxaj@v{_fo!t_@~i*Y}U44_xgp<*9?E#OI!+ z-U{A7R`0a%xXn|yMbxUqO?`UHYZH-Ibq2dtqNRq-k7Py+J&&jmHkFz96t32&qQY*u ztadIjMJ98QK6U8I$`{M{&==qSS$pZIv13e{oE_r9xt2YQzjtkSU7(R-it>7QhN(cd z$DLfhC$ZOfdra2KM9vrgZ7UOFdORJ~z5kxsKK$ff*Q2Pmo0zE_T>{D9$$9CY`=Q`X(0E;eV>l?2aSyuMW9RK*ewZA5{}+%fp+v{RdVVI$WL^KBnC zO7r?(VZ)xz3rmm0DM-mBT&ITd`5owJmE_en9LOd6U)a&KX(0LfAx*1X*|KAoW$yVt z-did=8Ts{o?c9dsN)hc%gIu-)w9^CcdsePHZjrp2zUGeI;OkG@-tM@WYW!L;E8Gv{|E$sd>AgN^bWj6?mq{)k zWwo#ZXX}A&W$cpAijQfGZTIfFlPX?ken@Cni|4NQ=gHA`ycny*Y}JJ&YTQ|dm-xPLNb<%XwbY!cGy(L;fEpS(0x<&k`p z_|Vt>RGpbAE@>^7gjs6*)l7U>Xo+&Q2g6I}&ZNh^!)b40j$}Pd?0!rWXr`SwG3_C@ z&Z=TnXHI|7Dk@na@#T_+1&J4)+WNPd7cC>7J<}+Gu5ErQZ#~7s>yZEY-PnnhvC4-M z(f$hImuj~yT(#E@ybcOh%l(v7e!jyZSuMG>zg_{Q%{Kuz>pu{@wr%J2=sE8`|C*Y2)Y7SMFJpH( zb*CQ^eUz5)3?rd6ZJ&Mx6mP^RN?3;y}N{9<=}@HwZy zTkVXMf0T9M-6L_UG6X5tjO*M}6iT8`Vg+O}OVIO54U|c_ZK|vqbmlV z$9?EmRHl=ymU5~oCw@$1&G_|#iLYB)>gKlxT-|f<(K;!!_`omxA9Rq(wc%ebi*(kS zwcS=yr0^#U6`z-Nh-tZ-WAP&4_=^kOlYVJso)I2*PB%QV39c$h<9X_CTP>md@=Ne| zRHMDv>IlOEx6Z(;*tkYC!NcHkIZTnU!47g>129aklH?1T25zA zM9RSwX&0w}Gd=J&6=sxpRnv(x=;ddQaEHl;$j{=s_1^wr>*de``tC}%8R z`ctw-Lgb3-)gex%RsB5D6PXv&8Yi=MzASLN?9os+S6{wGqwLJwhK<#}?1?V|i1$>N z(oFWnr`M!#ebXZ$_N~Vx{flPnH|}5A#r!BEf3E!g(O}D1#og=LJ_u^c2JC+=JKU)9 zY>=nRPNaBpZ`RNn+xqPvqYho|;*#inBIT=Ao^Lk!SUbA!?bH(l?MS93`t$c|r06J3 zZ2ovLyL7J^-ro$o~Of~nHU$`8D|e;^r@2Lk`$2X|lo_0c1igk*5)~~)$N#7OP7gmu~OCD=Ne>&Ri&5m&C5bcJ+;qhk|`&(|ju2EbX2Guk{ zS}~8qx%|XQhQ^bSD;g3Uo9YAXpl`*DGEa~_Ub`jERfgtvNc};U!JQH1omAR-?Ac_7 z|1znt{*Ys*CfA-AnDjfYbNq&E6yN+-ts8RNgtU}NIa#E%fl!WgtjHAa{F+ziTKhf( zipuYiE(#@JXgLS9KaldX#nbkL93diug?*ip_N|$vJGdlGOX{wENH1*MdS&qSMNS_E zo;TsXSaSEZ-C`YgyWB+V9t?e#+|vBfsr+7P%cnPy{^EOLI>rV3<)5sR=@yy%bYelY zfO)n5b-8)Q^MUy&&%*uNWklC3UE!$w?!t+g3RCA28DjHCmpO$u4o(mHYASi|Xs`Aw z?qHnm6%EecVT{Y~?CPp{Cf=?YU#$4#`sv-XdWd&C$ui1*O6gtdj0C&~_<7Xb-sWuu zc_J&aTw`zV(ie2!CCKI7^BYzuzC3d#o~yrK>s!w1SRVeVhWfTW%|0&=>M!&*Gj(T{ z6y@dzfl`oeleC3#z!>BZi~%eN1g)HqIPH44KaTrdnQA7_@&}i5tV-i#-q0cm_GkTN zV>!~Y?fE{bzuWj~#qeyY#1m!R%YG8PES>Xwq-Q?wdG6Bk;6aH8nLNA?uJ=4uIk(aF zKEd@$OcAr8we%7n$FM=844sNO#(rh`uO zUJ>Xso+NJXst@He7r3kvv$kiW>|NCp%LfjtP?44O0hQrj-qS6I1Wzvr?C`%~(#d}D zdT(Oc(Gw1r&2xDhF1Z_72NgXbrqfEL=Ax0n4GYSx+QAdE29qWU-kyiWI*XF?A7rPT z*lB!}Vkxo`Co6mFaUwc&{b=RD#pi`g&EvBfrZT!An&0uT?A|A?x9!u=(b~_98ZT;R zoE|8wKU*%n`LV5nC!cdsQVH9k`y9e-6=#@7Zf~>OwttI2QiHk-BPdo>jO-nD^advD z|E@u||FuDbZ8y}~xf||mC1+kLq?`!8mZsY)`K&BB)3l}N z!h49h1SXq3$J_0fuHahAO+4K_vML`W54NvP_gR|c#IRT?6+iK8r~5}P^Y6z8rp2O< zak;24#$R!#XH|SF3)9&wy3YK0QUmIplrV$s<6IEh`E*u8N4Qls(M0j%jKI^UPrbG# zo|nC1s-3h`=) zj?$^jI7KUXpS>aJdzMGja4o0rSi}CgB>RlsKbiLs8gq;J6`{gpI~57bqLuc^@J6<; zeMIi0=%8GC&H}}L$$G^rGnjfVj~iC6>u1ZK_XTY0MlCS=a-}Ox+rJ!G5WjXa-cCH~ zN~KD@|E2aVhq)>S`4Y;U&n%g1=BV!Im7CbT-%Ry^?f~Pp$0?OmtHSA`bCrSB)@+x| z>vkV*lB8{}*iP3r?mw<`!DEQIp`~D5=ZW>+F8M}p98Z{s+hW7?9ZJL;Pd%Rdf$W^L zt<@#%pV`iFT8&E``bQh3+}-cUMy}dNtL8|KHdNnvvfr2LtM>JHe;;$X#)n$Z`R(6~ zTFPw)cj;Qba8GMdJvDgVy}a?r4ZF`@6Y^`uYQZT#2D0T1G!+{>7;_{m&Q~Tu;vv$5^W z74zAsC%HFB@&}T*e!z1c3##hviCp7cb>>Q`lzvz(T$UQsx#n~=mUkic68WJ1T*tL$ zbaA~N)|x&4b2$FIBIDoR052VmYro$YRkY$}JuWIwUs~fS`$0nmZk>$s%e2Djr50(I zejqPOejtNyQOi^1+Cu4V^yA!DpKX|In9Y8wR3RROZ5w>q^7*?6LEDh8Xm(a$!Dz{| zdyhvdzFzH;T58|B&+l+dkBM7Cgf;W#zJPra5!uWjSCt0xWhMF@osJP&%qULfkqp`v za-PHSu~cZizWUY4p3yU~?1X&6+4yU2&VJ&iyN8`Sw^*|c1#~j1`Vkk*qhKdk&K%JZ z3Q{X8pYS{X{ru|pT5Y&OpLHMGm#eLRG_?E<-!+C*Z*&@g|M;Fe-AA*(XZ1hRZ9Ne1 zZn}O!ZD3A4d(vslU4zvR%&kqC3BA64lK)JtBed${v21$g25DlcVcqSPGQxrm1I zcP^M~?3yT0e{}QAk=4aX2fIds796ZZx$X_i9VUJ3dc@J9?s{#fbgye8HY%xTU06c= z!Gn!&{d2Es|H&{0)eodhA8z6!&Uvo#S7d$Ps*5K);j|;nv_b35<1XQ@XS8 zLcCyoO*OG0&VXS#<~FQEhGk(U_CO)UC!lN!HFR5}P+RT6~8#L!TTb~6Q*s*!-d8znhW3TG1$HOrE4m=x<+cWm3@>eLD9}#suwsgIl!zo#w0lJ^5aGT?6 zQ@vYv0*HqE?@BsuF*)ltG4D6La3d8beor;XD=H}|$-AEy^YO%Vd6?8;KI4ehcS}3* z&z568R$}cgKIf?u8qQd{k|`F~+_A&Xe4=xD$<9|Q^Hu9~xp}7f->V`Ujrsmv&_6%X zeg2@;r)OQZeNEz-XBwm-wY3Yp!92mwui4Kwd4ked;lE=SGir#3YrB6%qv^!(<;*nu z5AVEv56X55hlNWcobEg0*G$H*+~Eh`0?GSCesDeaq?}cTf%Vit_=}d3i1g~}J5JLx zfs;j9{p&r$w=bj*H0?IdJM-9Gd?ChLHqvm_TH1v~9u>oc2W+!c(qR6FVCVZyE6tBe zS{6*~RQFo@rhqUoAIcl|blQ(^;YC;R)31}!9@S$wBtYpz<=$Z7wHaDL=1H}7Qf&@x zYNC~eUG`(!514X|qB*izv-gJ z%08~@=N>8LiRcw-*H*|dcN^q<Q^jqIs z?doye)cI(J}-e-cMltM|eaE&i87V;VXV6WuA4WTn-vV|(}d^*(j(DZi(_HE$LDnRk=_ z<(ZRNZza?_bjbckYC6@6l_EZmt7N?>xVTPYP{DoEcv`^3`a>c)^cjAeWpd{}SIHta z6P2~kQRdFvNS{8zg2xQSWu-6g+>zSDDa}^lKD7K1h3m;-%I# ztixT|RPEhVCWa$t*mvH192EGe`qPQIl-GOO3o5w`2bPz=OUP-;f$rk)Zi#@c&S(AN zuaO2~8Hc(t9p9t^W|YV~N;SEIV2L!gp|67#$8&k(s_bMUjn8W5+{6ZbZy2z9bup4{ zh%5G30W$UfZTi8XWF$1{lVnkV3+rEYB66PHdnaUms?k~G<*0i!)Ox`rl@9xNuP>_~Rr3p^W&|l|edu6(6P`GF^GdtZ ztkdi|#VI|Z&Z>E*-p#MidB~nBUUEE%^-YtS8_aEBYTD2KKtc`%Px1x-eFQogGn^9g z!amh9k7+!xf+ll@cGxrc`prZ}fTyg{Qz6jRj~C=;bHh6YYb|+#_O#O$gtosc^2>K= zx6}@Fs!KSp)8d8Estvs_#uiW?nDtFQ?qup`x~n~t13I|8yVxTJEIIvmH-VXVGcSBm z{vjZB?@6h?V$GgBlS4_-pPLZRmVaMSvlKh}|LWDv(TCMaNw~-tv z1YrT|I3uPWdz}51j68Syx;uOLD7qeYL~ckS&4(v8sNqqH3V=ZooX`VKX-*=Jh{2$| zWQ-6ZFVr)cj`jk5z~JX}v>*HdfP%xbJdt~x#5Y7~oU@0$!CEaG9#ulj2;8r%PzS^x z4MEXJw1!B;GgOq-6_u6cDTIAq&ig#QTpc|`um>D{Jfu0y96fv;y#l21CeBV?IO0y6 zxue%^7ipaSK`++>(in5c0BNk5uZJ_r?s6bNnqA*_pRc!%G+~qTK_6##Cuc8d4$Iv> zo+yEYxA62MEFs~IoOk1hh&-)L!c5_c0p3Gt6>+B7(CfGXf@gQ)L@is2LUO0XdyNQ=8DlUuP z-b@bWJO0pf5 zLH+?7oY?fu_p@1-vOEm}22KXa(Ln6wl;_X8{1Q#FX ztwV_6u}7R6yV6UM8^NpkxbTW07Z{qzqJzDFDrzD>U2+D>wowJeAq-$8NLlq3%Jg`j8a_W0{cv6i#-CaFgYfz3oist5u0Rc~7 zi)wCeP7P>9@q19BGHi9C+{W-nqweA2l^sN}>$Ke6$50NGgHOxbdkQ6R5y{lRIh4dt zns5&c3=CsK6(iYDS|a!tz|UYq6>hPi^n5ndxQq>zt6{?ohTMI^hE*6mb)}UJW8rAg z$wrW&$;c=x%E-tJvSDP5mB-lF7h~k6*zn5GUTV^6uFbJw4AxqNVF*Nw{$?Wzo4zcL zbjrYb1BK06HVi`~VqoEQ3OEqpgn-313f9n~=6V<^DrUUdnt~Y&%fYD%tu?Tsu+?0J zt##`m`MUK4eZ1+0P3ThO7ET0fYJppcl;SLe%&g$&W>#Dl`1RJd=yK#HZn=>8RtpNo zd@I2~$YMME)M7h<3vac~3SEZe;g(@-Y*7)!h9Dxec@ym1ya{%0u`q(2**I6~R(%r+ z*2K_6AE#!s)e!a@8$vd@iI}aHCKL{n!H`^>1PyY%)e6!Xnpm0O+9x(|-MSUBdQAjW z3Vp4CG!DyV8I4#o57ZNuS(5Jb-(2}N-vA!@WKmj7$Pbvgb)bYBA|A$GjL)C+n@>S1e{bVqKs%FdWba|G@+Ak2q#)+cn}FgG5A3f zI8GXh+(6N0^auR$yh7RfQDh9|$CngTzNu`C%Dj(~qe{Cjq|`*cT}8vAb%DnaZ22^L zjGSt=c=){#PBjD9rsHe*s|WW}ukI!1R!5X{zACs`hqhtlHjyb$F?iyNAks-8X#)CV zqMSfRg;4n%%$c|8=sd{g8}J=i0Db@=5FZMJ0mp$8z)2t+hyYFjr-4Wy3Wx?`fLI_7 zhzAmYL?8)hhhugCuYlJ;C-4QB0cHUv@D-Q?x`1w=2j~TcfKgx=7yefhwRHr~w%4Jgh%5v6)|l>t~yBzlBJ<3T?0!!lUA$bHTcD2oj=8 z_hkm_s-Z;0fgmC)&b~!gP7EA6ic=Ipl<$R6bmj8Fq34BB7$S)5hO`Gda%SMrQKF(M z;^>CRfjWlNULvgbgSf(IBd>CF(YPxP``wT9SvKiCD*)C+5`?alvu1P=hyH- zhhBI?i*X52rm3INlEbgL0Ea^mIKHx(>xhn=T-;Gz`r*_2SrNuj1#9G9UfZWj4}NV4 z5$Hpz+;E$MF5Nzz8xARERQ*ba-cZoUz@p0Ud+@q)=f4E%(%GFZvwqxITdgCvY1uYi z`nqM?AVM`bW^|2Q|9KT%dNTijmZB!A{AKp~YB}BfK^^*<^dU$hRZo@sy6%uJeZc-U z!vevHhzZiS>(Z^l*TGKeyP{*da)-4Qbq&%`>Se6AuG}}{H#!C)2+l(6Y_WhYJu#XK zaw2t?D%Z*W2r_o)62k>iZAkUdm8&BYb?9Mtb=D|0!z@kUL>yl>-5H-$4 zY^5W=E}d(_41NKpjnF!&L*G^@#VW^c8yj6Y&)m8-^t#96j4(twE?!VqE{uEU8hXj$ zOkNS?4E8}1(T=hck5FWZjzZQ!YzV*9jMO}$%eB0|4n1E9>W2# z-p*#=fY7Kh;i__Dle0SXOBwtR6CvK36>n|C2HVQ|2ao8=J$UD&OFySzs$v2=q=+QB zm^VupTUh(84Hna?E~agH168V_1q!ij)B25Ctb@-QLq0Q7?AFk2d^XeU91tOjnaDaa zn~1)S7*h=@%H&sllnGkf4v*+M2wcP?$=s57YO*1N4bP*8nxQo1!h!-FxrBJKF8yw< z5=tY5=sKX{8w}*x_HRIx-@SFwk-M594cSW(G~0j}2(W>Rkh#v0M@OJ0p-CkAgYt5f zLGg12-6-theS@y0%1uvA=|H^p;xDlK;XqlLOLgT!Lqc@uFWYP&S~*pGgRa~YwWT`r zLB&~=%1V=bmS0!y^r=%i^sdfMlqMB~6;V3L=J>%NpPdX!y1)Px#p_XV>1495GOCFL zsvw9eszOTBqhhb9AOt78K9vX?I45W*I2A;Xkc6lot3!l#9hBFl^3bIoE{C77S>w0@ zguguq*F;bioF>i{)g&=rF4qZ!-Vt(Los3Y$=m7IoY9f3q?+NHrC7{m) zmM269Y$*3BBA(?%{q$mgdI>+hGe5l)A{mTV=v&HT5)ww>K#l_^fRh9cZ1`KsFiDK1 zxuK7Lion60Pb9%sB!L@=g3x{9atN^$xB(6y3lVih5^Tl8?gThOB7sAQrNzx42dg<{ zr`TIjA`Z{e=^RK0?0*HkhO=~z;!Y!9ATR@mm?n}KI6M;$@fCK>jpA~UE{JC1c;&$UCfuXI1LqHB;9cB*f`Ei^Kau))%z#v3~e#D6( zsc?eRi)kAmZ4d1F1iMat!pU==Ta120;J(YU3L>)V>gomzWri``WR-ydUDZ%um2RME zz@Wn_6b8y_`bI{Y#)k6HcQ}PG2plv`=*;6;ro-~*Nb6qIw~sQiH}u-7+%TR=8&1DF5-Oah;P`w-3nZUc9K zT;MK{2jm0yfC8WpC<2Os5}*_)1ImF1Km|Zi{#iZ*5wet*&3YFeh_I>xdU~;Ycm@#x zSxFN(@T}``8_xi@id9F^TE}MO-N{JwH6DbD*2!+i;aS;oK=u=~5HI*qAYA=0lz``! zpYz3c{`A+az z1sDXMRoF4`Sp}W~pH=8N@L2^9V}nWoekAy;!X|<*#UNc^dFl&TR11q@1@WQr>arMQ z*j4tAi@hL=`O7~8_EbrY(BQ|lKpjx6C5y3YgN=INA#6!(T?E_@d2CM^`A&Wt$9A(iNU=CPt zl$Bxw|BpD_93D#lD^^Mo_A&r<5=$sjS|pZCq;yCukw__$eoBP3|ED+*!QtC1X^i}L z(olX4S?&V5fu7KX(6OF%dgHL&3-kf~z(Bgl0q}-^H^4A30*nH0fp_T)ZSdX$AApa* zCr5weoz9{pwo%bDSP>i!>f7U8|5_q=2e$nL{a;nBdL3#9;7P2C_DdxHNKAi@`>T*Z z5L$o^@E5`TrJis`rnC0Zwd8aifJ7h(I0Kvo zl7ST99B>}E09*tv0hfU*KpKz^WB?RZ{HXL960-ctF=X3v^wMI@S$d9y7A`8OM~h0T zaj_XaTWm(V188U?-U0jPN@E3;#tOv)GmbQn3k{kN4H^|qirtZhy)Y(lj)c9)a`BK2 zXBzhMV$2nm3yq5=r4DF0(vWkZc6`vTAm7e3Bu#2Rp4G@;CpKe@a+@rbfKUi`K4MWh zEXb0?t4_lp?_6oWw2l?~!modZ6}!69xkEt)mOqiCy=1t%hkPYMjw}CrA zE^rsf1M-1;Kmkw)6amFRDNqL7AJeg72fc$31?&lSp6!DWC<|AE>;?zzh% z1wbKC1QfgfsqHw*B6$#(50pYYC?f3rF-2KSXq7<~xdOEBEtC?<$4ALP@t2kf3{q^) zv&o-9A3**pfJ&eWs7AYXf>#UF0rkK`paFP{emVeNBk&Yx0-gcSfo7mZii5TL1<(e( z1loZP;1%#%N|Lp^3+M)VfL@>v=m!R*Gzjx(%Ay6lfM@jvE-$)itZpcJb+Hjw|&ulz36r?x_yi8 z;G#Rc=)PTa-*bzSu#jsGbaFBLdC{F+bmzEN%MxUy5C<)?-?9a!AJgzqs10qd`^YJd zut_Wj7qcOF zjFK#bd|V-p{)214y`4yqLH|bl+0u0zsHAPcww+yrg` z*+33(8@NN@kV1^K$g483rmMsZ{X-+P2h!hk@X_n>cj5f?-1mhSWt$J>xd#*gg+LKd z43q$+KpAi!Cpbn@99s&)(Bj7Rc1ZV`F0!_d(;5pC?v;eKZ3!n{n z3A6(pE9(D!(UPEI-(OLWl&=s+DxmO{z#q3VNDXYWZpZ3?df?$NvBF&!@|Y#o^h>by z6m~TM&w%GZGtdIG0xy6z;N>4O^~-P&Ig!2IJ$u#ci}%F&^YnTIvdtYJx+u}tP>N2V z3+M)VfL@>v=m!RXL0||N21bBU;4SbD7z5q|AAoV-Bk&2B049McU>f)gd;w;FS%3+A z1?GTx;2ZEASolvQ%AF+-@*Dzr{<;fi-I2cq-T`AEw#9n-0VFs6OQ4@1F!2|GPD8rS zz#ljM$SiC#0l4i)=74$N+aCfg$$MmBuzmwIG~BjnWGXZV1PZz4)(}T1I3$*E7Wb{aX58tspm@8~Jd37y93tKvs)SBFLl7tc|lE-(-;Q zYgXVK1VDA5gRH!nxfU5;z<AyE0vl+ECsl>J4y*3Y>I$oUf0!nax|sS0q#pj2dYF~E zFmHCUuj*;q4sz7(;r6EC!gJY>7eOi<8+&1#+lG#e{7OFp=`A+Xu{1m-jrTBx9jTQkI zY}iMsZ^-1YpG<;0Z20h{4@#?EH4R*sCYOBq{$t{P#%Dd6fQK~yG3!|jl7>5myhV?J z?2aMGTX44W<6n)}MwH)gOLrq*Yq@diYI*9l5je|;G^+&pkf(RQPVkPEr{=e>nkV9J zr-!5D?5>k-siifIJ81%TGOW~(AobX<)MJp^sJ8p~P)pd8jE&^8l^=&6J<6Swny0ic zChmpAQ@;{Vu?}6-lX&hd^KM2TNzX3@i7gw+%0$Vz_dlO~n9scM3M5n{ z#~)&S6h$86LXb}|J%Mv?2!eb92QI?<#?qnOck`Jm$su2>;+kU;KAF%c^rgbbtQVlp z;*L=aRexTCLC6-I;@tDFFIDfS(sEJqg@h+JpT@swd5j8D4lb32v9MB@PQl5B<^CXt zGz&1)V4RZ|$4-LBjwM`@Rw6`hBj>%2z6X4$hK}yeUXCJcWQ5YHibcs3K{XPEfae#m zSwiE4&jz#pCUnWjUq6{92Mg4e@n81YheHq+l91x(+zuOO_k%E}!g@H zsHiVa^w^CdOV;29vl{2?f8Q~;&8 zbU^S0P!GI^fj!n!HP*;2^m|c-J<=;FDh5hKqK%iMhqo*1VJrpf;pyQljKH(c3EdSq zydLCF8QD79SNV!91ciqUFdDPit8-esy$nXQ2>dKmt#LN4J}{paT^!g#L*b!27ZGdY z?C(P%D3e&jRA>T;HA;nEUmTA?FCW7ri{nvfHESyaZWz%o%k$azt-PGQVUCL<+DE2t z@^$h-FOnazA+2nLC=`#j0IhsuY_>#EBB2>ep)hn1Tsxj9L?o=*K;hB%^>Oj^qB?r) zp~46DoQV>|6)=Bhi!Xv0$|;a3+W0P>6;C(xpg$kjvV-6Vtm!f+N)qOL#Rbnq$vq&b zUZ4}`0*FsgqBT*H2+xR>9~khj@pCnz5V=sjS4R9tQ`Z>?VdSK8u@s!Kx{C+}k0rAS zq3{q+PoEBQ(^W>6!*gG2D3}c%Mh8K14u}h?f;v$}T<{1Pk$0kqZ=#CW5V>Lg4jgnF zJl4UHAjHNitwTmgtP^liQBeXJ&xXh87{M3zP>IERvbMz$NoAB57q1M}^1KiQY63hi z;Kkikgy}+}xRoRdAHIcdOfr?n_nCLlC^$Up0k}LTk$^{cp+Y1Ep2&{AglF4Yyn_1r zcq`P4Ul?11@^gy8YzZP89x*nikSIEMjukf{MjB6qNV!{lL?Sy8i>}D#CCTL|KLPnP z0u8_;sT^z%x+>SE{|AU^bG4Zvl!%QbCIl%q6uOcNb4M11;6%?wo!8{@Z*s~-xug+9 zrIL$`EjCny1ZO?K>A*_E2pCxzCA0D35*1V7`9CiEXQ33=jYnrI3lpB9;b{5sMn)7KD^#7HFuvsfT@u3)e!zk-s! z2HJpj9R}NS1gS89;@E=d>3#SRX`_r;%rB{%Gf-y(1{(=tMYJ-ZVXq+G%tR1#20Vdc zCFP~Ul1m6bj%<*L!|JWr$oNyiv8;kq+5U7Pj?S_w)=MdA>}dK%6+}KS0TpT;!XRrX zyYN_wdfpABE%pY8um!T#Ds@xnIZ7-?iAKYgV#oVEjrjXKPdInU@$tbKh2VKJN1!DK z{UU0+<|4J8`c|L6dG@`&&PB8^3?EX(tuMy9Fl2^42YkwsY;X}pkT3e^V=REKR zkf#lTJN;8nOHe;T-~%v#;#L1nO)Kq@W>Az36#to;*8ZucC&NFI|IpG07qztGqL%hy zY3RkRK-dzqvF2LT)zqa_D;G~6&tEz_Zuua}O~jcy?{Rgc5cK}6!4067u|fzkwy44D zeroXYzi9COUm9GB?{75t!XFyE;13Oc5w!Ot&;!gZYH z?9I~ZZi@=dPYAR+!WttBz)BN^?H9ZMr%q{N%-}Q!GmQC>cmw0@82tPyCk+1EDwyzd z0sX1+(KI|XIsF}rDj&UuQ-WBQC{VVoi3cpxYvM zRm>T-pty}xbop31;Hd$6QS8^Grd7K5=@034y3ll_#KkCo*d9e4_%5u|JVI~~o=3?f zulsREit`ALY&MU8^1rgE{7vcDLDZbSoMkD0s(utV$V)O_y>l3(I|8tD{}4dz`DrS- zz6r6EkRHhht-$KNLcf7-7yhg;|FnDFFx!A-_oTl4w0oh6|G@4Up4>rVbriS)g#PF!$kmeEA0n zG0N{C4`Tq!=#5I{7CN9TdU&l zduNgm5K#L(@B7dD>!)9DG8yj7+?hGwbIv{I++)&qe(6_CM7LN9qI(z(qKmc`i0+PN zg^Ung!zk8*<`l)Ondc})%1DWQ6ibQ4mB`M2F8%qEK<~ja@LgCNLU;WS+WIo`u;972 z_~T9ASI2kVyNgY1Hx1zhU^}OU5+mDD?4>Y?1R^IwB56A|DMHTPh48MC?e6})p!g=N z5rSIT?h}8W?XJQivER~xeT{pdXl-J<%@VPRxMpEIW&?z{W}wwF4)Tz4ogrr1utgfFv)r;m@>U!~ZQe0AcLQ*=#7TE!07Tse6Mtmed$ZQze3NoV#=_&BCymZcN8KJs9;|Aff=vug}w;fHpXde2} zGz7ezJX8igwWG~;+q< z7(^y9*}RZvhQg}Mz|$0Ooe9j~Ur3l?m%V1R-}d)qhIXRjq16z>7mAvRp+GAten?EA zNx06*03n45QCbm|oORJq7Il{yR949X^3jUyzzExD2T#@dO|O?rSHCpr8$BXaY6kfx z@@`rf8gD0sA!Tx60^PNL$Xe3EZ^ZuTe}y?lkIOL{K;w+OP&z}Pg>i#m{p2w4h497? zyay>O=&JUQ&sr0LYDvHEyDl2`iEinJ6R@7Pl7(!A91y4dtob%s7Q z%@C6wpPr!B(+*|-R`)XWNi?LU8WPm8DY{g3dP<_s3sO2wuZ~Sh%+STh8&cKCC`v}t zD;@vD>toZ4Nqo@A=Us^P-mpaC#R>ubEd{h6p1=@njtaP&?_AoXVM6AJP=44 z*)L5WACKe4C&l0@$$5xRGP(r%kZ($Cx{tbbqB_Hnrqk0yC@*pUWG}U$mpaA}qfhLG z3ysHzlq5AAsIj>8RPRsR^?h)XI`rBaHyY<$mkW4pxLZTSP(nlKF%Yf^S zH6->?XC%d^rzLCjvh*~y&e1{DS<|JL+JIB*qfhZtC#R?2+|Yj-T!Arq9lcsy>=lo% z>8U=vC$V)F&4vyGW;@&#eGbkRijMEs9MmYTo z!s*VDzr#j)%+rctWG9n@jJB|oXjuc9(JEbl=XfDNB=JvZP4r?Uciw4}7e=g7;xD_N ze~GvijaI4dX|0Y$qdjGGEb5(2jzyo2R421zQE{(@hw7IVl!w{12;)PISWB2!CKJ;? zOZBFGRmJEuy5Y=*Rx3hPMu)PQuD+*@)|QE`=&J%<(Sg;_Re0+h1CKQaVt-P_dwX(6bq>{1qGcmUQ2>97#Y*CT=Ukl@`5_VRc z6+1nAng3du!$^49EDbFbM4~$euXV^=t{Sys`U=0vnzhC=Afru*(&NNR8UM@M&8#>X z$dtX*{Zas>NH92?EUn^9S#7>Wv!O-DRYRx!Yq7J2oTFD}oU@`L75>E3^3B5)uLKCI zE(fd1lOt4H+O^X%T_fCn%7t-deENuQQG3bC7R;KT?fmb+OyX3{>_+wrlT(o{D+-7q zPl-AcV=LmFj4f@pxWd$e%NeHD=PekbEd`c}kJlwYz^l_FL7(c7 zL#!7#4ZM|_M*F*ZsT1^s2MxXGZtGI@$VyF9C#A$1)XDlp*Ti zvRh5uak{bvR7Au9wb7 z+A*mYZ7YYlnQEYS$wAcyU4u(3=suu@NY)D@PS5o7RT@e7J`83p7}dajs<06ZYhD`z z^GaL@^V(A}dV2?+cq3VzuRylNZzJ1Z@XY@TYyq|cZ;~w}H<_H`$(G3P3bM`e`=?~9 z6v&oVcu_{lPq-FFAa6yDv{P5++HRG(mU!8!<637ixK_nGHHlp{vPV|nrzWB`v4L(v zt!$t+&NN}#Z(Y{Twr|cg!5tdt>_7?gO_*r1m~L_)(@j*6jqPXjN*QJdS-a+yrwErA z2u*CpDE|x)^=4x<=9nldFU)Tnn#*ApEJ5{b4W2Oiw2&@-S_nJ17^k)aAS|{$s*n~~ zETN3*T2Nsedcz3T7Sgsbg3SaruuLE@{Y8)SyeFC%!4ZCzMle4!R$&BxAod9^6wE?& z=ARG$9HuIR6`T1EaY78$1I9Jj(v{tb zk*}B#n=h_p{p}@B4R8YR^xDmnm+UOQ&<*2`peX@%0cBM<3{zt78o5xxVJ!U>Phx#} z0}zo5DvlluLTfxA+MX9YUTPz+Rh<}{<7F{1CKmt8(m3oUa9@GIeTM|@+im8)+a~VQ zG%Sl=FPbH4Cs0o^c)d%nm;44lu3K48(?kASZ^OAt70x(#rrsFxpTE zlMn}V@{xW4#sXzxGZFG?P+NMcT}R(ux@1CqUc8lOr&7WSBqpFWEuLaFI|-)>wp^mX zdYIzkIVVn+Dsc$xt&26_&<2oPBJXx0k%}1h2|tsxyJ@dTX$hz)TZtwl%&nJ_2z1fc`s;Uh)`=pghYoHF{DTFIc6 zkPZtnz{`vmdWAp+EDiyMm^WRjFv9!Bb#x^*N@dfl`q&QoqiU?K$ViT7)0$&DXv`S%HF^zI9WkP}Hue?nvw4NVlGUU@R^I z#5sQft(JN0{lr_kmQFYx0h6dGk|I;ep4YoXsGK7v&uk)!pu@i69q&oK)ycZ#WL-+H z99dase1@Y#@Wx|XYP^1+Iwn001ysD$DM|PZZk#yXAbosddKz9s1Tm$?=`yepJ7daO z2j_j99zLXK)X~6cAf$+#S?F*+HRCtRrNMIESAT@X+;dh7>O@w9mUW4z{;@Ya^-sKc zPkl%`hM}ZW+lhT)mNIoC?bKQAi1pKLzO546W5;T`_@t9r)ygx>In=uT$Fw=TPks#2 zl2yqwtit`qu$jRs1EYLLOtXDcz)$I6993+gv`~O}()w)?L+|owwZY zZdRU!U%H!&_@$@jS@@-SWx;Y@*yoS0WJFU4_^J-&&+s&bFF{lAiK}DRvGZZ)CQpavsj`Oj zMwevP8YkPMG|sFlGxC$l*P1U@NpI8%yw=H-pn`Yc_))4n23SKm5o9wQ3W_3C9E^ zFgq1>Wv}3FRcI4yDr*x@@#78P4&do^4gb;#8k3H`f8*mr-~n)zmXQD(OPBIL)Fy;D zNI{!O=d}svq98F#4i>F>L!oGkfG>3ygmxyWJHld$H+h;D%P-UQq*6z?<0KK5iiJT? zEPhAGF6)3kNfIl4!mL>QZB{IN{=Z^Tp;)lE`AF${PRlhUz=$|^C9aiyyLHLbZ0j)M z#&wKz<8_U2ldjQB$(x(uk+FhiVPI?3ES}-M{0R`v;tAS!&@65c%_8Ohux8^w0u05RXoqDnBeTkK2ijV$3A zn(DDsWDfb!*zsN!y^lytCV5Nq&HTJcWaj655#i@<8Nwh2InwBVO z70=cR7Ay@{^%pAw=nufzU};+6K&zg1-wE5lzN}`pe$wmt!U!-mL2Yc^9AzR&Wy+Sw za3pL!3>C3zWa}UC<2Yax;OR92|Khd}n1gl-FbNnAJo@i&b${OS|DLz}ja0`=iAgA;et`?jC=LILg|L5wrH~~WIzzOgisD-I93+v6#!csAj4b79Vt-lc_ zVME~qcsHh=ftxoIAWS_S?f+-$xBCEm$77%-0EDT>{$FBhnES1@)X4uus_t4X zRgbNns+F>Aoa0Yi4(ni~>P-u&Ko8{>`JZQ)sX3(vw^YG>gsMi!n+Sa_uf?sPp*!gE2wIb0-L zIyrPTNO-Y8!V@h>IIJ2H{&Y~7k%3*!xh75_&7gJ}!*3&(Aq1b2Pwhw#W>oyNu&uIY zulmQ#3>Th-<#sIiN^_sOG0~-3V;P}VAY4@5R1u?@5HN2vlh7-WwOsM6hFRm>zT3OR z(I52D<0J@bCE%t40qYoQrX6OD7ve@$%oRo1yTjEKmk}GH+vAHDugp)^Gchtwyp$;a)_uVIrje)kKVbA#ydoH_`WvmX# zI+WT=C2owu)^-)6?nFC4d87{u4$f|Gn=V{}i(Lwk=Xnv@O4EgWE*q7(p7@6=a@owW zIhuX7+HtkS=2Rs8X%?B2{v44FhkyhmOd#p+BW#`_!*pGxyuV8Eqt(qAen4gn@g|To zh^~koX_saWBn=p9ip)u*KNRx+t_zVlp%M!pvD%dX$A;c6ETVl^U{LY^LITS|ifA>T zW!Bl(F+%f|M-gKmPEs9AzOf=C)*;mayD>1`!>S1B8!)uPcB$vR=JHK zq(x+^Q@;@*q{Z(aLYn1VP0UM8&R>#SfDqEG$;3EYi)&n$@Q7NC8A|G+{t-zlNFTyy z(N!2|?X=O4tb`o8M^b^X%T}#BM)KzAz0%_hPnWpkge5Jp2#tAz3w z^5wo1Q$LcN=Jgo$ir~<_m2;X4@Z(NkGvMjIk(W}mQ(O_g76Kc9Cspi&_sAizoEa3t zTVg80Lt8~lc^4kCIO7MJMAi?M{<#kY|J-Cj=U*h~{BzCzx$P$Z9R1iRzjBZ$kjwj| z5a94@Cxx95^MLSAM4s|)CnDSyRZMZA8V?CLY|VSB#J8wOH&}TjBloA8g<1li2KO^Y zW(z>x*-dCG2{mJus71ixbIX83az|=NCmjtTi6 zi8>q^PofTg!5RFDsKc%A5_PB)Vo$VjR=m&1e=Xz?CSVYBNJ83KgdL{GRh7SQBzFl> zhdD!Yx|68GcNW3sl;%~{PQblW+~KZnki@>lxa1vx5U}x2!w$dJl822l1|9alWO3bZ z<}(W!UX_9lJxR<)rJzHLn2l@iA9P4bZ~6b@utN`1I&+4rf_LyxV{F4E5*a_yvbvT$ z=9qqN-&!Hokf2t%{E47Bz7|t=?Tb3GdolhMa(O0GDQx2aemnz|0G{r@<6l?^aVO9o z1NH$=A(pQir>FmesQp;pZNux2yt`%;1hgtJx=ttw$J3d_@-R!W>?w%lX@Xc@Dv0Iz zX0g1(ES8^@tQQs2QTp^g21GI-7SR|tC@{t%qEIi^Qdg-O%=_$Gx1kzzYbEJI+W`h)l1iKzh0NnSwVJ0a zBvO0yV-$65>+^vUSRle)>VZ&pTU#fGPD#mz*wje3(>#&1?pHIB2>H|paW4-6gh=+I zt({2Vy0(a14?X)&wd0D&^@(lgvxw|VOa~_nHEYIN6oC<jkiq#eM~qW7d}vwb`5?yhVsv-(nH7{!5|3 zN?wXHIg((Y4ud@mHOH(Q-V(EJc=wof1D^oa2C2b)%>nB#jP zE!6n7#(4Fk6>9voh>FSkD)H(Owa2Ufyy<%~f3JOyaP_Aq-uTWGu6~aMsuNd)MABj9 zenkDTL{`_B8GQ8HXwMjU9SCaWkLH2A>;D^unoB$n=W#bGJP*?=dmb+1$LoNn>qWuy za2H>00~dg2I32RIhp9@@yLcXIjaQF|mUJX3O(fFXL>b=+Eb*Pd65|Dy7;a_>KDmpp zNOYE2FLM0BsKL*Gy!9k1`PwQG%L+s+QwqmGDpG9pJM?^;WW9k)y$KK{=Nj5KCF_k} zip^oleq&~bSA;2RO8%*2wTMiEs@QX;)EJpoeVDT5ugaqJ9mAB}o@W25Hh=D^6Mf_nIv3UOu~|E7We>2)H5u6U@ z*ks~$Fnl9~CsR&a$5;3r{__CkYcWQ*!vW%UI99XUVNm}a*IUBnKv0W#()t=pseLM@ zCaIv_mSM~*)Y~1E-41`@$0xurEY;xY{s^ChIHI_6e0&K!1pb7WA}bG>#MD1@JQzcg z5k6#1r?re`!wh}YQMy>Opc;Z=1*SS`VzHYbwk8TUKqn{&2H3plxUF( z&HevOq7C}*O0@9E!Ga2U-K2s>Y6TUvwV$m`r0=K0@$8NOh!Z0VZIHJ}=DXM>!f%V9 zgnEliGl0n3$I<7k_&86$wIsr&N^Kfb^srgo_nWLcXDQwI z!ZiKQS=jKrtgY}}kj#sGA?$?|hx17lBL~Fo401p^lN^xOIE_pU17X`&2=jE{GZ~kK zyA`^kw1zPWDD*0&7?y1gC$UHgf#4N#8hODh%wZY5S??uy?IYNbBrD7p@mV2F-taKI zsF1wZ!p${gme^-u@i}DTOnS3S0Ay`6NocGH9wFhK=E^Zo{j2rx`dd|mc#_H_e5Ev# zYawm%7Lv=j^1w%!m0D4YZ(+lJ7w;qpUV>VMZ&N`x>?fuc6X81wH=;uLo~kT-XXD3l zz$C!aeGL9J3f~{`aT+ih_zS}K+J8&<_U2s+q;w32DwbcQC#9JpKIlP~o&^s<_>L2V z?|eb{&NK_(0<-YFAGlsr?{en6_DN~Fn0USV3woyW<;K(zDp<{sTGa1r9w7q zMauEG_6ZLVi)sCkG89%TWFen*Wem}a z#lj@2>6G42Ja=ak9+(wlMGGRI`zKkJa+zu&pj%bg@m13H2DPfykk_3tCZ1Oo)j>6g zYGc|S3G)jv$!bTV8@O}#u88xK&pBTQvU*1?vikbR%YUv5<|U|ARtNa&>bPB9>(2nAn|iyo6?h{qRw zpFTpmQEXgNGExI$`|;(#rt{Ig^UdYJa*$y(iq9}wHzklJ)h%1z#EBJcjX;jki4f!% zoeZ(428M#hYl*p7r5YG^7d)Hd4(9L%)VFK~C8VN^F-uoYKl*`G1p9$xA9?idfb?Qj z`RfIt7>-O0hJ&JxhdcQ94!z#JcQsYPzIj2hIs2E>Jylq`jv5_cstOhoIp^yx)P(L` zMDkxTVcWM}HT z(6+u+imEZXUXiWbjV0C8Bt&{RKZuhw9^Il;-|>UDuw4|jfu2p3ghn|SYp4y164AcR zKgTMPa%N#kz~`tQ*5-5K;GOBy%rX~iew4_WrnKRz?ic^D#HJ8_-^?QO= zUa3Jj)2)X=M|Ntn9aEaed)8q2Tn-avzlDr7lrxR2>|5jQ8OyXaa~w~#bKa}iDphuh zHwZHp)mGVwR6OOA-In0WmUJbzP%ITZ!5{b*!m#80@>C#Qh z6-a40O2%R89LnumW&%w0;Iw5{=6D`Oo^4g<+A>p-(2h10`UBTX>PHE|JQ-VM_O_C$ z1wv!y(!7P|qnD8>fc){sFrL11>1wqG%Q|qwkn2l|gRYjjl0tB_%H57n4u~AOot8AC zRO*O%;ryI(gLG>o?{&D}61s$@Zp}`&(G%A{JPMnSpf+8?&sV5nb68B3t57X;AMQg% zwNUV&Wwp>!{CE)Xbl;DE&tZ)jR0}-EfxoNh5dYBOW?3y1=3b=&*r}B&gEsae zs}_*M@vX&C-6^n z;H&GS9%zC|A-IeSz5oz~;5=F_Gt4i`&#VwoM|P;F%+tSt0V%Z@6*;Nn#FwUN4~4+3 zv^IqxhLi(^Z2xa71Y-n+VBKdpnW9D{={U;|3c)Zx3xxm+juHRy#;OWIBP1bLsdoKN z+5jt!kb3u~!Zf|9aC)wTvD$TT@6fr`Dgr2V%@jco;LC%Kjxd!6MX;z2t9Br+s&!|u zLzViVI@AY^?QsaV;4nbg;Q(3_I~a>yquhy5?0O!H3GhiVsDNU@4pnPio5`V~*0qTo zDr#NR8A=?-1sqd-v1=bwlxMZp z1vw9^YLG(ri`=n{FE`p?Q$k=4&DH+C~&4k33r%nbk)TZ%826>izf^$t6+*Z zOT-*Amkq5Tikvyu_3U#oNbD|d8c}jk&zkvoGHfSK4jG$$SN7B=jGSB>a3|r zMkRSXS*H5!m;^d3%0V`@oK<+|qQ<-yt6QlX{l6=vvPKap^%&Rs1RzrCAz#~AcNOKW zL$#(1*vg8mPni|Te(hi8%2%i89l3giG@;C#|M~?V7Fms_?#-ac>NMJ2pH)?8A5^%+ zEN6?|RuHn!y2ER=*0mL~nrkX@Y!p=H=?jRH8cI}HsJGX;SWXr1tzK&-Qfm1CZWe}& zBB&2_URZU@yxNx5&tLF-`7qQLd6^-EeTFed&35B*TdT=ZQAm~Mm)r7G@5ap-UIw=@ z@FS25{E0sxYjow|GlCw;E;W~_!WodY!LU3PZS(2H7RZ~qHf-KDYh$_MFL_s2cizcm zAVH4=vT@Ky>3-%6nxV2YWJ9G%isr&XTGTLIXnszvoPwRRW=F`5jar{{TA&Z|v@Oyn- zPZ?IeiU@t-kxvqIu=kfrxL&ZMUp_K)^0D+)zIo}Zsivi`^7*B&^0D+4l6U&|w3uw= zmA6Wht>iTR;7@~}%UsfqxXNP?@fKqxkp&&sK-+F z>?LbfQJ1xCCbRZrR%+%lME%BMYIVMj^o{UjkKsWo1z09zg_w3~vxlO+yBAn29MyS1 z+LsNxHe5iv;*^29mBkG2Sbx=+n(>N$(n_>f#Qypi5~c_#UJ5Ik51j= z?5A9M|NLA!YXPKJ_@mYH*heQ&N-AzA3cpe zPUX_Sqs<470ULpxz!_jEFayX0#sDXQhuFUu?N;CgK3_mP0Vo6}0uQh)2kky!EN~Uy ze?mJP*bLmcl1rCA&ZVb5&7}_kBcJ8ck4G<}7mQg%7ht%K1E+y&04Xeg9W7)n8LD@Y zwM6Lh!gkrloj=wQge~b-i?UE?P`-x@I2tnMXvnamAtR553_cn%{%FW8Ktt{V8ge7h zkb8lK+zvG4j*yMGDQL)jK|^j08gh5gkQ;=C+#@vPHlZPR3JtkgXvqCSLv9%wa@Ww1 z8;6G6yC|k~o!FDvR4BIA?2?l`4~jV^=aAT%IeHrZJUb<}rhdoLvH4gcmaK2Q5g^MX zp8?4F(lfAZ>|GiDa13@ji46}Wov31b-ipsJB&txF{5Sz0tjULo5-!x1e87U$q4mgz zvH0LfKKz6~8je5OEU{+pToLbN${*tdrar|9V8LZ(I+>8Sq|^W3yGSU^Aa02*s?NmCulWuW3+^2c8-y- zx=9E|*)$ejyi-%~EgRo9PeUHe1zT3Ply=bN(X4I_upZb96acV!bvuCFI8xDCNmuHQ zEgM>ob_i9XN}&pi3>}oXht{X%p;(eH)Ddt3J^-A720%lg5ugS%fD6zVa0T3eCcuY) zJKzC$0v`chfH&X+d<^&ket;He3N!=!fdJqWAP{H{1Ofk&6K2Qx2-Ln}sq7$@w#-@T zMe5K?s;q1|0>d~G*aVCMqy?sxEw!&+hO3se`-aSQtnE2JT&4WknfQ~kkc=kYW%qv6 z-j!e3G8!veDxD%w_mCDV+#80gavIs>kt&E{xox_wT-GZ^l_wr4OaL{*x|X)H=$*#( zuOP8As~8e|G^R|IM_p2fxu|J9phWJzVAGCtI3r4%qj}rm=wMaTylwlsa}~ zoH}{;*7V@5?Qw#C96>?+nqGN)$bOzNfJ&1`Pw|;;8L(fTM-R)RbJ}grqXS<;#=HW~ z;VCYEniHJYDL_9WT-DaTkWBdLHu;9DzRGe5bC@4tWs2GI&cl*od6;U*lRa#^ak*P> z?{Q)mO@6$cDh33oHZX0n(ZIZJisAUmmW~jIr00%I2TD6dL*5t&|xZKI(cM^B|Sd zF^p5QC2d(B;cltBw-w9I_d@`DdjCXvA^*5(exiH$j3;N4ZhXdqetyu_Q(gS`>d;LNbT_en;$Lvc?B$pE+MDkHOJ{G`HV3lA7Z!G_6l_=NX$j7g_3>mU z204h`?fU4`zD-H`F4aBJ5JGuus!M+~ZW}$Qu9&lx1@EOMj|#DhD8_@d11JN21EfQo z8cUs@{xLONRXAm~F4#Hs;=M4{(q*fca$>q8KPUH%_a0iuIDMcCW?fXQW}PfK)Jf{t zol(>MUFlK$Nasl{H5!qMfrM7D$WlofwVO%D2jc6aZnkMFHUNRvL1 zn^o6cDvi_YdZh-bn`*_|K{Q0p<&JZ1Mo*o`&=vTf^G1b zaSmdK+aTk6qjQ%yuy;r2hB$yI>P*#~-R&SYKNM&0JBU{}nC|`)-2KazcYjD?xr4Zi zgP3bqcXI}<+zUD{9^;Y0>>d-AL9<)i?LSd*i_0@;{|r-48R)5(Wl#8ZSO(4gBFD2c zsgA=#^z>v*u41VTRfb1rlH#~T)^^sB@FPoY=uSjA#}0lOA*R87D zXd0YHB}FZ@VI5O-BOm)Lu_-?$i9RObhATFg(8_gCIyR2=D3Prt+D8d3V;lDiKT=|T zBom^_OXw5$E<9}Q`)JVQI;xIKvBmg`Ex^!Bv>ck? z1?7|LbeUX-`xWack8>0|JcEMtVrs5pnGN&zRG-#X_`u#x>aNLfDVOG2be$2j%!az8 z*;l^IW)wCH*TLaa74eE;>K3)kMrIY8Jg|I@P0$=0F0p=a!MHg#%6w>F>!#Vyv0>Iu z3z=iXK6HPk*}8j zw1e_bD3X8uWZ#H&evVJl7uHhy(+(}Yp5gv{=;J}c{VoqW7K%U&#bY~)8Z<;M)NV7~ zt_-Xo%FMRre&$%u(bhhlDc0$)It`!MG3!3Fzi#I$B~hwU+djM3+h@u#En|E9X6G%c z;b+%_oL2W{w|o|E>%Js|ZA8xvb#WfOe?2U>jeBUBqyw{YkDa@`cX@WW3JaOy59!8? zs)3{#U5`@vbfDxL_k^oPO5)WGUSW?_yOw=I4^S;^!rD7it-yFx%keHsJI3K#Te)N> z)4R}C?v5^)Rz1U2ejO*!kfM0Jsf80&fru0+^R`=syz<^Nedtxrv-e>iTKzFCb44*% z^rZecTxw4_u~e(Y=q#COI@eZ?@F`o-#l_*OIV&WAq^AzPsz59g?@oQIO2+B--u*2d z=3Nw#WBaelCJ}-N<8%xMy$?R>YD^mTMlnT_l~qs*khK&yQpn%7HFn{_<8W z7t2FilA}{iTlb*mXk6LXEylNw)3gj-*k+^1wGA8EtJk+kEeLGbOpzt#sUhpEuGnWE zOl|!cBZ&r_WjK*D_uvCcQSRj2#L1a!*XXCS+0b7a0xzC`gm$ByG1 zvd->a;9wFUK4%&HfvTfRSCxyGQ#&uQOV%c6Hk_-Yt)SX$e5-2HKG})s-P`_CwtPP~ zY(K}@Yl2J1xvby$^jgCQgMloJf$A_AbQ8)x6wRGv&80O$o zpSvRGlHQxbRfApNlYqdRUY*U&MSAc|J%`?v-SkLX(Y<0 z==u2?Lu=}Z=4gOM!cLprRC9E zoauJp6tHcr&vwT7Kv3fys>kXV#s2IMbDtOZSLmuZ+4 z#S5LwZ=utBxRGV#My|fo8L{1e{44Y>yQNWtMZZ<)D{KAKv@L@QCP5^g+ zuKPM2Ai>sIygFrDu9k8%H2S}QlhyYzFR{a`0% zOQFw7Yv)hfAdZbHqC@X;2#(aqa|b(_2RkEZuoF92nO5%7WRgo06VGFZH+lZssKHJ$ zNmH#_v8Mr*TAt+;l;y;I=NDWwZnm{@FT7bhci)=L?BDGZV5JDJ%YM5gLR0iKQqfN` z=}Q^iiTmA!+U?RwbAPt=t=T5lU4=m!R7qc%P(Itb3pnzc7S|GBrTh&ZvKZgY5hUCS@z2nsbkh3b1Nsn}|d!^+#$b5LkMzJjg02 z$coEpws|M5{OJ&!c8BbDs+fg`e0I``ohtTc`&OEryO-e9joQ{u#R8}DovIx>Ri+~) z;z;W&sdYQsPjYpRPwHbpRr#-+Jbk&QzU7l#qbIp?h5({C&V=JF`?N<1Gyb$s3GMvN z_woo;r>8%28yt35XphhxADMMWO$iQCNp~DnLQ`ojjF$4_4mZ-2P#NV`+V)mj&My#0 zkng||E_s(QS5A=wI8r!3>rO7*OWAuJpox{%zodMjRrEqD?s@azl5u&g@;)4hlTO>` zvD8}KLYFYPbM$1F2-TAB>S;=L=drlM?LVPTmFKaZ__C0TY|Na_b(@D9JkH_C|Xku1F_vGS> zV0mQ~Rkgf4@z?vhok`F8j0B-hfda9-rcCeB)WWmpRN5qTa#dWfzi2TF*;sD(#so(yEGDWi7g=| zs3|!gK<@m>>Dd67M1yzjJMo?-(Ya6z@{Bt2X*jRRPG=?5Z5Tu|6lWzYSGN>z(b)>A zn^8X4_QYV@TI4vD7}eaDa&W<=w{4hbQ+`GgeMZ793g3K~R&K`u?wzt9WJ|q@I!v<@ z6mcgGo2{Yp!}KG3JI3{IOr5glq_}b0EZ&-?!H22-_$%rIMW|BoFKEu2=_ zU~nf`3~tll`Q;PqbeUL(I~#>jsV|nFMZX7%r^?`CxS?%jrC4RdCV%@Sh6R6Xdg%%6 zPg9h4QD);%Mk@$Ezxn7{YiC+(xrwb{`^>kIUq;v0oaWn5=XAc>`8Mo^g->An%(t|C z@I`p&iicM*e!M*KhZDNvpT+^!+FcVp>uAODL1HbX+&s;uy;#e6v5hb&j<->)bw0fl!_D?}=X%%YDD7sk7Vr5Nfz)Z2XLgD^l5(;39seIC*4C!N zqL((fu)ZCjx~`Q_yDvR-`cp<7%sLyen`8U6sSmYvH)pyj2frA3mWoqc2;dsES9Qdn zX5h4raZ@~db1Fyq^YIyVt(6`m~D->G~L7rlq$w9$}g8diA?67BLS`y2;qm8K*> z!?GWqhcJ73DEDcF)GxWOVeK_06;&5iMWA^>plR4H8kSqwk-W02x#<$DaRJxWp(wnw zrwd%)_nqZBqaWo++=mgT?IGn)5w+v%EB}g6{T%Z>w_fHl3CiiDgbz)l;hk}x(PBrF zP^qdN7!?m@!s)_6P34QxZ)F&M`;@mwZ2HcB|b?y>{VM(&4hV z^vd1-lIXg3Fpw)J#!!dJxqkG@Jwo;Pmyfbygidj4=C>(pr)M`$qc$UW1#386u4^|u zkPRiu6FA}X{6PGQvur4FKHZv&Q1u@98B}RqpOAv-ohy0|VYGG~v5$+ko|DzT#4fao zSTkd_sKxhzMBb$Fzlg86VdH<*CgjqiRQ6OOACqbZeL`F6Z*Gw?z*WMg9@A=0>~^gXqbGve66Limlvgez%IW9&cwP{gX?A)x4bClh zm2`2Ha6k4S_xUtB%sp*yt)>1VWB1p=e&MRe_r;nFS(&14MC788J0=A}cjbyAnnc;FA{gHObmbz^8!*iJ}t+*Zct+lu?48*LjCdO1bRiniQ1rDW9__n%hPbxr59A_-AmNi7?i#B=vvV)QCSDx>Q(+>lSv=e zs0Q)~Tv)%KqWHvOK_Q5){^B+>HQEq-1D2ab^C1 zl)|YN;vv`^@MGO^J8Ah7oa}6;94G2$Ews@IuI%h1T{SyhQdRn(Ny!zq%+B)NsuR0w z)kk-$(Di(b!k9jrf3#BGxB{ZSVn5N9xqZdw6VN&Li7(S{+?nrNn#R|peR4gTFjsQj zs;Zw&p?}cw>$LH%Tn7V0X$q&ze3DDQer!LpKJ(Ao|}gr`1HR=dtHpO#s6m=PqG zvcrq^l%J_H=}aBtD_wl%O=_lkM_meRDNj#Mjm7$z(BZs8)OfYauLcHP4djNzZ{9=8 z593%R-g}^8?4h0a2El*N?(fbiKiQo_6x&WMsHZMNH9>o5_U!XVnmv?V?~(P&sa^`I z7jEjy51FUAZhLUxO_j8oJ<4x9+1pQZeWE+KIe_{>KMC{SPP^RS;A+3Ym1~x` zc{|OFdd3~@YZ%3hc;++8N*340DdPHenk|2ttJzL}+hvp$``gok^6m7b?PhQCC>(0F z#i6`{w^Kc$Fws-TZ9C;zzKssrMsv54HgBUH9%2Ug-snY&<6kj%NBe{%vg(DiTn4w< z{XNVl?tV{Z^rG(DXjy99z0mUGUyVEd6@5&>^`aKJZNni}(j57$RZx5jovr;L4_4Q! zmw!QdVjug&KHS{?n+s@V4(68^j`1j9=8o|xpdGSDI7O&3$`yW2yDRE%b@|S!ynxQe z2BSy(J9-MLbl=_p9+#caV z9sTpw6~Wz8D<}Wt^ZvJdoX1iY0Y+~G2l*p7hzm-jPPS2|(++LW-A7CZI#HR9(%lyZ zR1Fk5TM;OvSeapt6ym(>3LN>8wvr1Lbv*-l1L|aS&Ug!jI7PWSK)FRdiSNDVn-XtH`;GV#&W_`F|%g`-`SW ziYO{1e2R$rNkpeOEEG|Bh~J=dKHVasibND5sE=cZnc;TgmLyE;tnHr&Hy3{{Lq#%2 z$!#?fb=j+UY!wJ9k&9!=W(P?lvdcyCxxU1%hdoQnDF-Yh;|o`NOR5#sf$B>2M3I}J zGKCF0-L`qBHMG=$r5k)#xFFbV>b;jC-U&AbxA3Re$l-{m8qg9o#^z#6Zww6Wz)~%P zX{9ofj8;2Zs_u+|3GGV7ID8XDQGIErX6&kYA#UZzy0-9s^6FTKZBHtNcJgg}c3=y) zg9qA%c$Y03W#>cK|K+bZC#e`%brK@yTXHA*hqK|UWid0va%2R%s#Kz-Mxe8n%yY3K4Gij+ro-M}I0)|OZSBk}1)chw;VXIgs=eHs3PV2SChsq42+ECITr1j;`o?e(sI~@IG z*f_B|Z{O9+R_>SbM#UxWw}49y$4D>PabnVyoAtpB)UQg4H z@VyEUC*5gqJq3o zA05{hf+=>VXnTqZTkk_fv=K?EXetI1##VBwuQhpw=gCv3K@hA`@?5THr$^bZc`^se zMe3MGDcl^9%BA{4{v$0_vYIQ({DNf*zmRJ#`A%PTZJi8v!Wv^rm7k^xPg6`^ir!Dv zciSq%>$}KyzZ|dbohNR?5nE_S-dicV*tSu!lhi?8R=n&NvBR?Ed6%u!Q&#_;YwbS$ zN>(X{MH!D1$#dS%Fu0km@iGDKs$=l}O0r6q=qE zmz2^GbxL)8gr^G6q9VeM{ewIxQueq}wphe85-F8)x=l;_0~AHzZ03sa`?&uNbqRAweq*1yD#y-IrBYsXHt5+2o}pJ|tQG>M|-qD8?n6o=;he=f?%E#4oFW z)xa8HEwB#Ad?aDMQDe^!=tzncM=IsZuywgDL(857vz}Y(=HcrK;28ppo&v?UacH0r z*a!R$90iU6$AJ^TN#GQ48aM--1mNTU?MOH z*bJ-(HUNddc3>~C5BMF}4;%mv0*8RZKnYL^6ahzoqrfrXIB)_e26h0y0XugLUxAIlCg2a?G;jvE23!Xo0A;{K;1Tc`cmg~Ho&kRXe*rgu=fK~< z3*aU23b+Z}0&W9$fV;pw;66|eoChud7lBK_W#9^M7B~ltATI`x1xx@Y0yBYGz-(X+ zkPXZQ<^ewe^MM7xBw!(s1LOjWfW^RMUJTMU$28;kk0b_u1zyx3tFd3Kv%*5Ykp!+_zy2w)_T4a^1R0Y3rrfepY$U=y$z_!;;Ge}4?P z2aLk^1;7@71j?QTP6FqEYru8j25=L&1>6Si0C$0VzkATO(Y2Z9? z2Dku_n!T5R%K)kMa}^+glFRW95qizUb6gW4o78}}(fr%g1gJ*j6-Ptfa5Us4M?>Cu zG-MdikWoNGE)EU34m9L4(2!9=Lk0;Axpp+ZUc9KyTCo*KJWl210Dj8fXBcSi8Z&FCDs$SmX)FT6S-f1;fLqI z-@pstCGZL;2VNtrKJ%t{CzXXS!vGR~KLQvDi~>djV}P;1IAA<50hkC(0wx1ffT_SV zU^?(4fTRFwCOThsOPoV3$CnjA9vVUz!rcc z6BGcy0KWp;fI^@MCL5I3O{bQ)pfxyZ@KH-D`9Vn;}2sc zG#tA#1p}(`EQr-4bKF3i$A+-_*Tmk!7bKscv6>f} z>&pK+0l!xBI&*gXuha3X>4mmkHF$s4r>w??hxcWx{+XNBgx&89G8~$NX)&{O{qalGn1Z)lX-e2 zZy`Adxg2utgq#Q|5z%rR2VMG797SF)=KO2q$(=^(#u1*dAXJ!Nri)cf&U|Fktz3d^ zx>eXTdo51v6h7QphkU==NU~#JA&;pYGif3AAbd^^NM3nD0z`iA0?1hG0YIzNKEU`U zy*I1k!2e^+`Vf7;BdapgbKHI+ipqcToL9$uHOjSo9IJ|KV{R=M-*}sj$BbDkZedo# zjPiQ+R29yG8wJSc7x}YFQor@Z&;Bx*!d_~t(m0^UG^FKl25hM><-~GV>>sZu z%PuMG2VR{*SifVwmJ}J87xQ_HAaVM^;tZ!pV=12Z8O=fqN{x%3cb3b}6*G-n_ zO3|&j4AL@Mh5dB?LsNzQI{rg5h5bSPgTKQ5Hvb_&VQ+0qcK$?R?@B(ffjGfXep7RW z{WrFK$J3fd++VhRKSN0e&3JA;`3dbWq^0AzQ^xO4jje*~dy=9*>kTN?J_` zN7W_mVa}Ov4{?EfdywnIw+Fc7x|mpz*6%;aD@EU1pQ2BU)weYy8U}JM_`y*sw{wxF zVx=a%;U)`@UHcCx%~sA+-X?(3tm8uDZScJ(7tOa_x!!!+fg6My<1DRbmn+ZE#i#3M zOyo-BT^dsKa)iVFHsb*07j=N)%InTK!1#HK^~k|gXytRTdCmbYLf$-`@(UTog#JyH z(djcDgyCvtbHVa%`BC`v8RsG=*WZeILHAqzfJ$rAiqcHwdXSBpY;GXmF5+hL?J8~q zX+vM(@T4v0?(ywwPHKnmI*R+yjvr>~QARjD@&MPzu5UjI?FjNS#yE$x81q77d)7`E z0UY%Yv%7jfV6?JDkj(qho&@NGG_ znQvcnM@fr8D^m*3<~S9eP4w%lBK?i$+L0Ec*4Oxbw6R^R5=O1Ny)bGSq{XOB3Jfe4EYLIFRp)ICs8X#YK`9qZY%r<=hW^`3%K^WMMHPI*Qtn!H?WmPJN@uJN=y#8OcN3biO^v zt>xPT+ zpT(YY_S)aI*Yf2hXZ3~FqN(H_=s_uv+(M23$vxi#Bo|F3_gD{Ja?kW=mE4RT^vP1u z)#RrF^GOQ#Lw%i=1*}5XE8+A7a;*n_)|UwJg<=q1Z!Wi!R4%_m3TO_(tMTQ9=j+D{ zuaTC6jJ~Eh47uBm^X`6pxyg?oa^L&WPNfh=O$Bv`1abK%B#FzL$wORzkWAz9kI8a1 zeLyO>6g3DgnhGx7pBG#Pmv19^Ts{V!&NT+E?&>`;U?iE(6=jloe~?`?mEBMNyzFEe zXfTG#F3_J+AiLol0kRwC53-A)vK!#f%P!ZyRdy-<^ubce!!(CcSIy+oz?JyTR_V}aGPijg4^dfKjX;nXn4Wx4B!QK znC2k3%Uu2mkp$B7W}@ZtgCv>DKPICB>C;icbqNH)#ZbWsd-8(o#^u{cZ!RB0EL=X4 zWN~>W8QT-276VdaU|BqyekRD#p8eTgQgwb=?juDdW8rIG0H0?+!d5{bKrtJv^JUdi z?}E~bT(ZQ^2UtZA)X-al?vQhRm>yDA>}~c#n` z4H?-|Y?tYZw`p_i;D6S7$O3^+ruz>I9=){nS;wK@FyVk@zX2AH9)T;=o*Hx%oIyY=o4&|s?2FJ5JwWSG#vX`_iOJpVeZT5Mf3N(4 z!rY399Mp>Au{VYjPe1aec9ux&E!RaFbYh98nk>=!so@dTh-{*iky7tcDvDB{ zQ!1KLmnjtkl<1=$=B*OH$mk+XaE8udwWxf&jR|oErsY++hCPNIWFq=B^+3w}zic-lY8Lp0AQV zCOP;z^wVn)dRP@095Sc?0+&VFCOrb9qo1iP1SXr35R%VPQ9#Y+sA!-za#RdZAD9BZ z(jP#Y!buQHqztb0yiB_EF%L%JbT@oKS-(jWD~d}zLx`CcN<#bwlkt7bTBMkmn^!PV z-v^TseG0Wm7fmX-Jd!Nqay@yK%X_1p$%7ft1oXWjLPF8hN#)jLAFqny;@q<7p$Z}n z!8#^H>R+Nmp*7^^KEyrL8ir7+J`YPqlK~<40g+ldY9)#^t`v}vSkp^5_=ldrB+;?( ze7XWMjLXN7sa!sRtl;tz@5tNpMK#Pq5*`l7(88W_3 zY8?L%gxf{}K!dpvX# z8Er-}(Z=(=OyTlzWGt6YK&J!l7>|hK*C1U9*&I)MYNS2A7SHt*S|Xahj*gN+eL>t- zXe_QT-^USL9!V;=Tu+v9d2jM+U#<(Zj~gk;^$}eO*do$N-!XFCglTw$(I|#D3{;XP zP;t)l*F)RN1iqtvxO^NL$>kHsbS^I;s}g7@qi843Bse?inb_LN#6;Rj;D#mgolNEO zNK(kN;!4NJVN7A@_ z0vXTcC1in_b}*WD@Kdw1g91xy2lW<52YXof4#shLBpJcwdQ!pVy~#2Q?O-(R;9N@b z9ek6Ld&3Nf^U-&<%0d-k9mylLS(*gc8n<48Q0PaqF+c?o$eiFPoCcJTcqX9s_z zBpr{$KiSbic{1O@FfNZIsa&ong_8n4-J~E{yCoYQ6ioC)xPLB4rV|XhsYg~!9rgzofTF!3dn^X=nEXkz-P@P zFe4SoHCe^E?Av|OS2&!}N_{2a$Y9nXISuRtOA)z)_kD)nLXlxJO_J3W z!QDrN34%;|6e%H^jP9e5$t=Mri{$zSX)-E`^9su7ddR>GIct#ik}N=dO7uviNoHsG zk+O`qA{1)|FZ_eZF>l&~rBE$Ys*yZ^;%vQ;hYemeZ!xf9Bkqliqa-MeJgFfe2{<6P zC1A954))5b$SW@@XGLZ*B{K%n-9tRSBxB;r@; zr^h2GK1moEtE)vPu}%~yl?*}AiM46r;vuL72a(9G0Rrm_i0qa4zyuWp@HGx0vTiuU zx?P~)Ok0VkpB&0h;V@5I%F6;#!NbK1&}pc(#T(nKe-khe1rZNMXY}w6TqPi{b)&Vv z!P+IE)@KALnY?LH5W_s3C7eM|J<7h)@aF($#|z|(+f~^lDql~xcVnC z**So`rGZYQrr;9E61s9rvIfxJW(`Q5fF2iT60ZT+3d`XhT{U0tRx|@+?PfM@Du8N_tWp;b65^V}ow1#LypMtsSyQslzXPja{)K$BZw zHTXsD>g2! zebRDMXcYsFzd{-ZZnUHmHf`@!-+ZuAM=W_+Ywa6oqrGUR@YYpp7@_ha&U zdWwp!lZM-((cUTKM0yJ3IpiuWCMg;syiaZ|m5TNEf#G$8G&pSi05sWjRif%8K^oO3 z40-H6QpU`WM3+A&JMUx4Rx|eL_uZI&pBEb`4Jn?qcag!G`|yOZWs@pO@?kD*?uuAF zlRP`1A|ncsBfU&1$TN!EKS<6RrJ?35P%gD*FXS0b`VSf&jS9yhQc4LjBxLw%MCv(u zTnH`sh@)q6^i7VgqcqwRFu@j!ktr`06Wd@~(kBn9&m3&~6c@q-x<`gB3Kg0#ymu)P zgVD|aoH0>Q7>&vETu~GzA8~{clV2!-V)F_x>3KgT2_ur3v*nRgaz|#0)7vVx) zO--2t!N{AzMlZ17uTH{qQ4$_5O48yk@>qyAuwgVa62*!k%uOV7PNxVz>xeQX>+J*a zV-*o#Z7UUfAw4;iFA5Wp*8!lkNJ;$M`&JznM_PR1vbSM+k1PH6B!_vV7EGL40AZkV zGYTpS;sdgR2CaF=tWAf54jw%#AzfThGJ04RZ)ZOaRRm0Yhx|`K%eA%72EDLu8DX^gS%?&Cd7Eh>Wox7DBplScrej1^;(5 z4x{=LE;uAVmK??!4+#}I>!gYK1)n94oc)tNL@Y|e z_CvzTLqgkBlzRx379El!RAVhY#DILy9TLuZ0Cg!lDny>_fLv;2ezy3imq8jL6%8WO zG7FbwM&^6rY{5wEsjSZIZiTs0&xPK$S2H~l5v#1rl*^cPy*~U^aZVuhXS3hW^oUl} zTob)5C^&&sAD&mt)5lpH6@-F#pxhEFUzIAAr%&nhoNHP~K= zQVP-L8u>bmwpWgV-$>JYC}C2R7v#Z4An>Y<(N#cmZz-TSMWj0v?^DoZJvI8oCN?9pMub^3FJ zKFl3*oT;{`uyRAKsM>P+djx+XOCXs1D|81x{@Bd=G2I7>BSF)T#Ys3_DaC!lHObxx zO-_d2;nn1V*L+zRW?lLhl_^WQdgV^k+*dID%0lF^5X78hDO`w<4KIuqyj#87JLFBz zcl`$Dr@kv$D4&g`Qms;9D;hhl{K{P9F_(djA+_EZeEsda5IGHO^Vuh0KZbZNGpL}@ zJ_kwWh(1m%?1FWxMF$$==b#YP>msR)My;zTmdwK|6hkLX)(k3BW5uV3Z5G(NV~i4D z&v+#`v#(tNJxPv&mJ`+T1z4)ks?*+9Xj0+Uc`sq#ar??G-mfK0Gc7{tN+j1678Z_= z7U;AnH9(7A!;9cLd$a&jS~Svl%*{>HD={-@(j$4^c&+b-hm9|QRM6?HdT-1$!ZMw= zCh%2}R0>-$$_Mwhmxx+QM97fb7giQt#sOL=$bf?CdM(y3L~xc~BAWlW7zju?zzO5; z)$!N_G0u$kPnk~B?oSy7I1G?_s6J(sX2!nSEq%2cvufqa+Mgv}Ni=#{l3pr9TkKMq zO)8uEIg^lWl6rYyd^Op73DQ{oehOS6i}mt|d}zPfJ2=GaUB3xC`zSRsxgY#j$kndk zU3%Z!@&Wl{3o658%sEo1E@DJCX9im2@a%PAGL%Z-uqGfz)x(=zCKb;u3zMan!rlR| z3?*7M2NAJUKL?>?m?`^yf?k;ju2)XDkBTMG}V^buxm<_yb!xR zWQEPwYp};AEWC<6;LS+)NbO1b%pFf7YC;1&J~EO1VArVBL_S^Gjg1c&6RakRhroL^ zVHqh3+xjl5o2EoYW&Rk{JJ3Uk_SPf-BY@3&ZHz~QA~P=gMXb8$ElRa!T=Wi*Ss%xC zFZP@lizce!pa}C~FMaC`N6{MsTgo04ir6Epnz+qXEmB!ZV`Q!4;|Qe7ce9PLvfqBRU-z zE*E9MMNdrSc`?8nZj>T%;BE$C`)+2-ZU*`fMAL4DTz`bGWrC)IKDaZ}qP+tL_++bY z_pnDvYoesIZUt_tz^H|M8wHoCp`AWZ-DZ>lqXvl98l<_BZ!H$?iI9Dh%$d?R$w%pfRz8-8~InNm_LRW2#z2SsUH!3hYg-VUP$|+1BGE&{y_o>Vx zE1dTLg5E|sEw1AFI}H3gp+nA;0X-W#of{_^7X-rxy4x0Yu}4a4A~}&of)iaS`$i?l z-*}m|r?~U!Mv6Ntd5kaqp0#05`vpPG1p$593xcK#0ycXF&-{*s0zTb<;uovl-a1d5+t7~)v!5_%~JLCqTj9uq*Jd1iB4Je z&y4nAkxy4mS62D{Ecm#r_gJEMp_{ZjYyEy!_XXjUh&$) zQGw(s1FBZ{Qla?qu!YJ>59#z^qlavI$fJkx^iW0*m2eQ%;PLP+nqT6F`Y*{VTj$!vxSaAW1uj8_Cvj_=DxLICpfhYJT#LGf=Mc!IuMDU{X zgD_%(`T4M<3T;A>JSj;d_O3%qE1}>gvN%$l58o=e_`u{<0<@0q)I_@ks8N8L1n7tW zwFuA|8t|ql#;~J?wP?4)LHBA6JLDs=# z6me67>^M=4P;xs57<-JjQ61#-J%-vdu|5-{r5;0bHlgM(MQBy!P}F=Ip>@E!iS_V) z+ycDY$h1aKw;qeO3+Iiu<#tEAJp62>2sL{6p;>_s1fYswI5a`w2*7+mVvAdZ=#&s& z5F&`=R4qbtMQE`IJtIPEL}-%;6mj^?&wK}_jr-v5ApCtMb`!OT()QJr3hDwdIs<(d z;g(_{x)}EWNc+DH&%3UlvBo-fo#Sk%bK`go| zW^Rg^YBy%Cn`p5cdd3Z{aYHpsXxE%@(UGxcABo_AM7%{>bkbIggWiy$JyO&pMQ2pK zWwv10+DjfY=-0zktTxjPx>;#ci+tzG&@(dhvMl#w(Jq3by>D* zWp^`)j6{psEDJk&VP%6%t@ahwT(dXGoT;`!X87p6y=HAzuLmr;)Lsv$iwCM2WHw*q zdPKIgeeH@|**vX=%lr_3bhhtGY=sI~(2~YL9V*$i?!YYpdK3J*jBXwQq&CmZ!Mlx| zyyk1nQn<2|mhfp8oO1s_N99rhU)jD1={M0H@c%a`)HlfFImO?a*>_Uqx*zY%R6of; zDW5>fR%j@gwp$N{obZM>$kkCdq4+CL?jG}=jCFjx48 z`Rl}CIJa|vMSB(wx^H(52rKflPwWQi`owPcl*iNSsXSiSwUdYMhht!G^h%~LsZ#|` zN&h2~=ve2!N;YI-Pq|irlHf+mCE1yw3CWv$5ohhT-~EIlWpPZ(BT~) z`Hz&iiiK^EA$j&OJ@T|4(|1g!WBN9~s(;Vmfes<58ag36cT5jG68-q0sr9$i0z7Zv z-BVN_y+uXU*V;~0hKGj^wP+{3@_wT3{SPmz2L=BT$#ksKPFD6s@g38uMe+X`A;GQ5 zqIeLH?}(n46LptHcCiQcgmf#gXCS-=?)A!f_yZ&h%zjjHQXutZGZuXiJTL{lPRgvH z->O;$kin1IM?9(?@u)nOP3p=%RgH?1Co`33e&x-|<}cjH;cBmH%N7hD&QUp65}H8^ zKR}eTMEEM(eOByT0fKe)O#&gP-+KgJfwo6s!n0^}Z7B{DB-l&w_)^R$lez)bSc-Fu zg67d?FIeJ%RTfhz_Q9Y>>=AT}DP`T7;!5=iRlx%`DyK9(aMqqs<(f!B6Q|VG>nkwTll*g?DZ@XP z?e_JXNA^Rwj}{2k4g2ehc)MF_B|JLP7zRiP?33m6KI-rQCXK1q_6vE`2ZrPpymsI_ zYN48)#j!w{gq=%ybop`yJ-969;RwfKo}Yp)=3(4^H7~Q1)jX+B&QyLaZz8{zN0-kW zYk6SwmWkT0q_y83oWPzkiN+8swVPWc2KO#Jj{&^PyReuL1&y7YMbWbR* zcd3YGIa;GyCz1@igoJRvEw1;ganx`qo=3&@)|7r!j2?GzJ5fB*byFH_x~q+nG6Ael zc9hhifxg4kyu+!18lUb9eKfMwawFu(@lr4r~Pa_MZT~abIRfa@a-?Y3Jx!T zHBeYcR@V8l&-wylpmC_K%*A9eue*A2x~sx-sTXI7K1II(wbfxG)eaq2v?qy8Rx_Lm z8|_qKsr_Jk6&6{=Vmqy=2zTxRq@D=k?drj{569cBQ8O0o+c7jVMzK z89_fkk{%!e71l@dhgDe7PuAJkOCyEV-G8XVrk#myqr+NM4ZaGxm`K%FBq(U8f(-P% zoH-)XJR#Loc~us92fP8w1E^QWMgLHlHD%&DP+ERZpS_Jb*JmfGL7#=g|ExYc>*tQ@ ztS<$q;QP?3(r$cSV$tTjHWX&VYrp&R8Z96Jhs3^noJy^=7)K*;l+;GCwFZNYb0`Ei zh(C{4t#|8GtF4=X@Kd!1-9~bHE#mcB3Lz}!V1!HaVQ$%(UaM(cp|RxlSND>1oH+gm zwA!XKf4xHMy4K*FQG;}C4KffXlx%H~fy}h?YkGGDbryX`1=K;CHGH%5^=R$X5570+ zc5aG~hq-B6mA|97I+nQ3yq!I9C7>t&foV6~Q4h-)rI#;8YduaVExHTGi(M7{&ScX9 zg87~brh{5*2sbT{*D{H#024*1G)x6L;Qw~fNN0W$ua*7qg;^IJ74=6%bD4^!gKitW zFX%#)cK8OHSyvXb&#X!d?>Hv@NQvvX`75*UtgVU*JqfSe6|Wum@a{xi&~p!(8)O;2bb0De z9YJl(@I(9&gw;{Eg%YQFOREhNSeHh_cPvp1JuhpJWp6^);Rp+E10>E8SX8Tnx-Np+ z4X50q8ni(xbBk(lIV`IAZE}?D6F~2I71!{JA-b=2$}fG)x|`8Q>6qfyky<8p3vQ5^ zi!r=;%-q{(dxLB#eH#G>ol}CO)~WiZcyWquS9SjsUv8%y>{K*zDsN?9UgdR#p;Zql zTLdCr_dS-6ib-ms9F$+IT;3OF+w+lUK2a64(SW1fMF%jk7l^K6l2g#XoeB&HG!Z5+ zEOWlDb0xSL-$MyTPm$NtkZjl)FAh9crkx2gI(J)|&qCl=fk*NaNM)ZTNSP&Qh0Bv4 z0l55O2H^6+^K-i6=J^7p`?yxXym=~ify>!5;|y)^a*{D_@Flb$bULbE_@EaDlzYTX zC(+|nq6aDTLGki(9xvy1K9sZVA#<^Ic!{o4Xu9#Y<~JmKqTCG*&xNgh@1}*plS%J*#L-k;B(0$9?otLpXYv1_qhXlfXvmU73?I*{@1cN)SP|96*ceQ5j7`!28%e<9JDP@=1Bn0 z0r{h-xq_nR5Q~XubVkR%<+}j-i?Z#%jUb!ZTA!|rky(Kk1phpBYvV*@H2*vm89W+l zi>C3A`R36zn@3Yca`Wh>&7;vXoSt`gMW+=MhvKGkjMl=U4j|%NCIz0D4l6r|0^#;8 zqn#;Z%V@ODq4jov!O;smNBzY>SO;*XDp*BW@8HxW)&lAbIPp<&%~4LjJt}SjJ%59P zKP(ECGMrZe0m?Vp=wNz<0(1C#6K(q+YOkvQjjysXbAqk07-&~)8*OqhhfL@Qe51`g zGs8B)br~xIma(?*9DCLTXR6DZ0McG*s=)U`ywQswK|s7+8>4GtFF9_*;oY{4^7aq# zvV#wDJ2~B%GEPoMTb)3oj+fDK!C?)@=(ympEub7$DZyYM zq3UzU7vm+tkj)FDW$AD@?JC)-gsOIsUSMBU{{3_kyS3k+I;x>Q09V}6zO~<(t^JgX z9w8fNkSrrv^oZ*{fv!oYqI3PZVW>v+%wrUROHP7|O1Ac+=;dkIlJ=j7fWBNws8WGg zkTtL>XF6`K5Hto>IqiYrmLvtJhsFRj5{T zTG5|8s-ZqejXOG9cO|w03nUf6Kppy&yTrgP0eTa=&s=yuQG04-q(xWfeL7y1l=*)^G|^GLyv^FK2lFkur*13ys$dEm+l^29 z*lwN3W*L=Dk*=L=3K6po~FWi zGr7HRe*9*0w02obh(-5ojUOE}e~El{&1l{=!}V4`H-QZQ{cjtP4`S4l=UqyD!%#o= zM6`Cl$3qrfzX5~k@Zo+|E%1b$4xfd})v}B=GvMpCW`=9BLD?dzp;h;FExrZC4FFB* zA&+up09^)U+e4>AYoaI}yI_*)GTqf&Jy3lotg9|8H{X3v2^<~Xc6Hr}+$P`xJ&d?Rgp@`03c`MgXAg4$u1slQicSt8_ z;~_^CiZ+Un6|{@o^-jq2lA!*Qz!fq*?5IuAM=%F&h5mq`1$$tg4ftc4JBL347HLc` zziZ^{00NItb>JNa@JG1B6@sJ|#-BRc=O9Qx9!H5OAZwIX0vriX^{Ke&Q!zTqSAwpJ zt|q(Uj47$$BGl1BR5}cD7oCf$O`kh>(U3dSD%$6JDhQ4IZ2eve!h0OMa|Pjjp4X{@ zu#e|;svzv=Z`n~n_<&<{R1m%hD3?<}QfXBE0X(v%yX|G+T|9EDcN;u%us3V-cEuxK z$4mYo9(km@3m$o<`=7)kgL`ONP0B3##BTqrL2ajiWW4Y10FvMN0w5W%^go10e%$?k z5RU}A`55pt6ATAeGh6R?Q~~}o{_87ZzHrja#3 zgQ5*~zI-8PTFVy#D6$bQaREh|;gr+09W8L62vi1L+hdcX3{-7EUg3FqH-2w+_5cdn zp&+1Sd(5DK>@F&f*V~)ZMt|^piADRqLTc94Y%?dSPLYno<&Tu8;UDqeVQmZ|@SUpm zGV65922nMDGlYcWJ;%vgR4^S;$B2v=i&iFiD^X{AquFSJ=lj?2Sop{Pml@PX=gk^t z(LUPpL!xf{lcFfP6v6$dTA-)DXIYEhrV?w5Ixd2rXFfIEs9`53rh~NqWnuk31dH~GSJzY9Y)&?CyyrlA?H}TdYKLj9E6x~v$fJ3R8;>(ihwy3S zP(*v2@vSj!aK^nfA{A8=uCjN=+!JQp`KMvV6QJMTg&3p%DTr~?!}ov~AJ1$Sw1B@@Z?9cuG0Y;a7x#*^9 zdkMeu)^%qtyH2ihpp3Ac+U&9?_b$Ge?!n`WGc7Lo;*PREg)g>riZ4dr8(*9<6cDeo zfG^gSZ58LEXtS)G!xyFHJiZu{W9QPkm>hJDQ`tHJ7kTXQ2f9+jS&VmzD_-UG)sEY1 z!3>>K>^s2~uRAy#uIN;bVS_E0+Va>=aS_ zg=cnxDBk2bPB;+=9#K61dx+xSJ4Y06Id0Qo2d&foz9W`++j)%>NCbjniN86z)(%U& zaL=WJi@e_MTqF zgNzIxsL|F%ePq^UYW|Q(1Btrons?%1f8oEb$+*1moNvBWyy1d#Tk^k8 zlYu>Q@Rp6)3UArn4Pvuy#je{_FK$)!0u!q4C82MqggWSX2KQ!*Mf>tWFnJz3vPcap z2LH0HGU&!m!MGCtVIld37u#Slq*P1R>;&%$twI7(-Ah8pLFJ8CZp-4pJ$pRut!-Ou=m&wmC#9Q@S(Abi-tJ&&gfwll)vzaBXZFZt-US^I4YW6_RL@u6rwo}hM-|z;O_t?Q$OHIT&wMTuz z@48ugU@x-hbng56=9hD+U^vkMorykS$ABbii6-`IW)CIX77H#N9T`ju%-thlREOx;- zRU}_Cd@^;?)7CtdKQ;ZQj8tJ0BXmM`ZnECcv?Jc4-O#(AMOXEm%{h!(z);gWqf=AW z`BYjRO;tBGrkb_v!Bn#@eQ)YtB`t)rVN}cGJEnL!=c?yFV9zU$C-QhhQDc)?`@_dg zW}RejlT%DEusSz(t^sRR*uuDJKI*vJv&c|*tEGh zy#>t=MX?Df34yJytKG8^%RnsE&7uR{l<%iyR%5A}A@YV z(Y6oO%*OV8D0?3gq1VWLslJ+Ijh8MeDvG@?HF*G9PzbF>+4ms4XYZu?&jpRMu^c+} zBrT4fx(C@RZ7qJ1qPns@aQ)X=3Eu`IeZ-yAf*!Cbf1cx76_G`K+NgzH_ zb1rSD)r<8=nN(H`yCbv4<>pU%#1FjiS>}I^Y{!MZKFqHWO`i4K`9kYO5Yhwa9ZG2=Cry3yNo0w|?VtCr&Whe?>LFg16RMI7C(b zigKP7zQWr>Ke-V9)n9b2pZOt(SW}efp!x6csyA>Y;Q6k0~?3gPY5bc2w<1uB76-=1Eb}r zoJ7gFSZI)o8RHla24`p4jq-G(9I8E)R|~OIuhb%s+FALvKQizuUW{vzEJlttvX4r% zVhD1TJ_j*^>?f+K<;{mZDos)d{b7>ItCKWtfl))DjV1WaxP9nR_=~c=0bWM!`*8X` z926+_;QU`ROYy?E6jYm-0*;Lqf%o_90_ns1{BvT3%?fv4tAdrJLcA)HEk#ob2EhH7 znO1_sPQR5fq=6{1KOkHBfXwGch%fx24zC>##-Ha^Hx;5e0#fq;jnI}mmyghPN20zZ zae7n_FgrY|dx8J-#mU8F|71B()QdWGt^OMPfqQjy8`cdJ;8C4il#<&r@VMRTQ=N%r zjs~cL(`D$(nspPwnSH?EOlgF)OJ-TY6wdp%CIiB5O<}!tLjskh#^D&n1$Qk)+w30| zL0a@t5t^z@mjyI}pY3#6*xWLO*gH|Wq8{2q84JtEZ_xm2sI6GZP3Q zciD?uws4naVsfGz5qsKi3!82WMboI8d3dCs0>W29K&yvF222`l9aN70uA|0 zA!55djgfA)z*onItt)?Z4E!Yj7MR_INe+_)`cD?<(X_=eo`l6|`a6n=_ zEUWl1vreZ62g?+nz?^0r$}c=?G)vsbj*yJj!xB27ckLM z$$$sFG|3aoSxwpu6c_&klQshf?w%#E?G(a3`x+p?EPR$=>nwr(W>@t1{8@qwrt1T< z4Alm3YdUWZxHYX>{h<2s0H^~geinVnJdtt%t@SpZ5oi3&cE4b|+|_;o{1qeb3;3X7 zB=TWq13&jM;{}v)2Kc@g8>%eYM-ue2hS;0jAf+_9;e+@Bs)cF+LR#%hsAib{A;65! zm(4coZk!CT$B7|XxO;^_9?gyBmQe*Z3gu03!f`EdC zW*EMwb`_u_O%N0ha%607LJqq8*s38N>BmxVQu_h}vp&V->5#Wus?HA=a_>C?-C zn-u7i`fp|Z!`L=a2_ZhU`7{KQQ%@PG|I|yAlx(lBf;1nF2NnAdDipCaN}+zDhkY*A z&&BG|8E{0IaOO-oP%sJg;0<;7u%ZW*oA6HOW|mcG#ezS#&H#pZ`c(=XUb6s!P^?w*9_q8Y2RY4gSW{Y z9cs^;dzW{rJ#Rh~Ku=JVn#|HC$?ZN1AT5RCCiiA&>q*C@8aMkxvmq^o<6PVf7c6pI z7_El!44n(wD2ukg<$vWZe^wlw{Z@#5XHdq@Aow3X>gxE4xBY{@cG)LrXl)N2&-Y^f9l=7u2o_S_IT%lz#k zJ0MB)qh7g9BRY^sc16l*;7RhV20qzSu#Y8sV0KaNu` z2|%03{O2{OM)0j4(?#`b)$_*Yr&224>)8xm)l^e&hC(jVSnPK2E=bdLa31d8<}AYX z%zjhIryDng@S?o!NY`&;q;Rc=1Z~blZO$}p&X_i5UE7?6w>cBGIg`7dDGU(PWik2q z74qIIXcN2t6{bnxVNB|S4v~ykyddNX21oC^mN$Rroi8=|chsqlY0r%-Rrwhu-6 zp|EefmDsi1QKFtlmyyvIje=>%Nxyi6fJe+)Nvck>BRTffgkR5j>0XDe7hsx z<;WXpyUWGx`=LI(ine|m0Rc7X1MeG?GZc~ilHeVgb%Ski>L_=qz?X>Ti>H3uy&2{Z zORI*WrU6d@4#q#^IygsY|H`3g3*|uTk*o7=;wC8FRkev0phn7ohjPk=uZUV`KZE4x z49zn!Ojq_49)}3E_T%cXO@m=fj^_UHq#7;@g6C>4eHFyQo zXWb?AP%;wfM>@vvDm#KUoWlcL;s0+u1xdsoS2vIF2BPG++F-k}(X3Vfa?q@+opW9- z&T5J9SHn|s|4gXT3~8(O&e4`EUaX3!Amf9YC0KRB@Ba(qrv)UERJY2k{o8XOfRv|K z-HKAHTLr6bNWABN4FuO?2C`^RgQBZ@;4!2=p=kNy?pV9q2Hc>HH~vv;nzwq=me*tb zdTa-xA#pbx`2p#?Bed^P4TfmfW(zI40W*cSeiFF0nX^XFEcB+$@WbkN2^a-eezyYO z2;Y}~cr-U8=6vhcyx7gzAA8$TnR8SMFRM0%aLF+VbrizYgs};4HMr66hD3Coj?Ad4 z<7(C|$G7LmQj2ak;{$M7W&8J4~Nwpp9K=WVlYLc`mhy^*(h$r;S9gJ-$NtIhe= zgZEkn|3-Te>0d%$1p)St1DV-qTHfR5%AO1buho8oCqXrP0xNvUp z>2DJ|?vNww8q5LEb%AoP%upWmx>-AU-D_rD#roI6-j%}|=zcif62Ndt84BSvggnvl zTMXs&GvDDe>@hURxgAtT;=9}!-39h64z>Rz(EkL%OB|Ouzk-hQ7Ds-@kzaD;)aC0M zh{n`($Fp^YAgU23f~W&D%wT_Gh{HW)BZXZab>$7xfT%e2AiJ8nS>Y2RN ze8e$JRV6FWy;)+^-odqmz*FuXamk0K~pe4;=eCYn3E*tr_Jl>M<%AK{q#q@#be`f zH9z4Qf5Pu%78-wC4M(VW(q*1p2_0-z{VMI~f8!~P-W;)}xt~f66AQto$IRL*UmY{+ z8V(=3wL%4w>Nr*%6B1sU+?*Ny{U%sULdu#Lh+DNS8YOv)mG4I5E)p@c0Dia0Sq-@m z(>*CeO8+BJovKpMcI$Yh|CdZUz@cs(M>ucd92IpCYibRh>NxArcDNE}o!=p#@*fVz)ZRJSeKYKzedp?wHrHm-b*g7!RmGKH?L{mnhbc8Eo8)d{$ z1}wn4GQfWzQVgI=Nc#Q)P2nWK0m`6n4bMU-GZRDwOEo_$2pNfRrIOB%8f z+HA^3)>n}vyYW?oUZvGWn;EQ_NEfv2X+nLP5N%%vd%aVH#VKe9rTPhr`=Qq;l_V@q zLOUsyC@fAy@0d!FVk|^>4b&$Jy(Sk;om6gJd(5k%xHz|LdfL^LT5;P&qKby`x5Vge z({!ZBmxi$ZCDV~DTx7WTQWnI!`3Am_!#9jvG!>!Urm09#1VM*`zpyo6`X+U8X@i*EAOCn)0#Wgw9uvqy_rJ84;}MYpZb49Yk`Wkx%35)NO1%&gA} zl<~wi!u=XDutONw-|$AIMb~Vt_x@-+*nu6V^-H0?M|okXdt2yXQ7J;-fDo%n-5X~J z(YI7gmBQjmbe2+&2#X&<=O|S!EG|dqDOD;gE=503A<`z-mAcz%qDuUQ=n9o%DV3uQ zZlckRvS`bVITqc^5jl&VmZGavtfe5u&U@3hQIr4B>dYdL%u>p@B@F*+Wt2r*_yYJ$*t9b+PG52Xp=Fc> zgZJl2y)p5g{0^rB{gyPU=aB5@kn zj3d0c^lfjjzUE|pv0Is;i(yGOc+JLN2r=uLtT(*Rj(=1L<_)c)hersBfE_pyLCuLe zx1ZjjY=QnABuie9(C-L^27j^s5|*bBIk z47d4e2W%KB|1QO%EBY#BQ5r$(sHhiEQ7>p6$s4Gsp^=y-IBa&vIR@lBe7AZ$6m6l> z_(e#1Yq!T}(Tnu#R_obI^z66RGy1^-xjAZXqQY9ML@_WXD}$xzZ}bIu(+|ZQ{0)9& zmGB+eNZ*hn+Rc3g0-EbHcYOu|7PtT>V$E|cYj&N^$ScqPsPt3g@w^53FJ-$jQ z?)AuhYq;0rE-DXruf(i_!<7t4dP$)hdeixigqKX?93^_I^(DC!UXprY-UxR9Wg3Xy zQx1XPyC|*W!=DufGE#^g-!>F{ENA==rXSS01Z;Z(uW7x8Qp*oRc9YOIF1rO&0EP6| ziL7C!xJndH>S}Ed>DpNLYp8X<7g+ai!4d4+<8c=@B;I749CSC5J<-ewqINkDu@s`h zlZy&RB&k?OSa=_p`&C0&c=A;xg0S%9Y-M!YsPIa)4#!zbQ3%~Su^4lqq{O~3k@y69 zhsFpJaT47?0RgPq$CF}5^o4Lz%fWNn3$4N7W1FzMl-d9Z`*AVi@#YYR12t-i67Zy) zMwTKo9}RLumWpLww;#nBN3mW9p_Q;}YC4Jm!Q+{W!6kDC=9>=YkM5l?cR85DC7m!o zfwIV>@Kp?wv`fK35`h1>o3`WJ0?y%ka*o`S^XWY~7feU7=!QULW*bvkWXyvbY!3s< z%Q)*v9|x4*ct6Xm!Pu^0s~{@TGRSK+5QSRGu#l;+W?fGfU9`V~GhV@_I}8x6N&gCl zoz6V7r8DMt4(86znBO~?i#ub^BKjV|J5=W;FkrehfpJYsp{$0#*0mVuLf|}pPtFtf zFK+oC_;*5jX zG>-woHElcyc)Nr7x6YW)Ihgx8W7a#E-*m>@VLFJ<`G=#JlUPCmdb2sb_22cjV+kZP zme5`T?H>*)18Tj-kC*L?#1_$J$X|!w!%hdYOZ!os0?Y;nQ`s5Q@iW`**PdxUN&%|3 zh{YGIi!Q4A#b{o^(#2TXN2+8sF(Dbb#RZ_jP@6dAlg}eI+Xtz8Jje*Mv&N^-BoF7A z#6{%8vfAWHKAFO7A25uwS@NL0qcx3STb132^ghVmh_1ldWm;rxQ&c-kRhA+^QEdvJ zr}d)S6um)-PR^7*6aa$Esyn+pR=%M;zh0YT6Zm@(&oUY>8pT( z;BkRySN$L%(?=>Ri15{9ff?jPHbA#H!zPsUj9fKiB>0aXDRfQEBZYA7S<{agy;}bx zrWQv-DnPwseM2uJgZT;;tK%-9H`E`6i?&#Ie-v)FiXp{Y#qy+ULz7>&24yCsS;uoW zi9D;uDuxEnnJz(`=lX5>gjlI8xxBorD6fJVrm}&dS5c(-D)tMCyMeY{Ga<#UlfvvC zUFU7Kf1OnJbrLGQ7y&kEHP&kpE&G!$hUL7Q1d zqz+wyjOG>CLt|Wqm8-3yJ;@oXt!PljisxEwDeK;2PV0`m$3WCglBGnuRYU!=-($9B zTSJ$_R>obs8R+Z<(`k&()juX=5~Lowy-0I5SHuQ|o`(^79!r9am$9-z${b4$+uk6x zR|~IH3spJ&fvBtogMk^<=%VR)a6(l7JZ9ph@|fop@UdrmhStN&sUC|8rUim(5VzLP zK3@oF_k1BL4t>r(PiUMcEIBp+T{5l2=(YOp;u*2ZWzynU6qBfx{T4+TWW>jS)AQcMn!R6L0M@nGMT@}dM%3k4!x*eDMgEI3irLu z^$Y#%^~}a{r#^(k46ap<}=xXrFP z)z207-EK9z-KbgFiwr8YufeO<;4gPG!OXFMHON1(ho{%q0wcRe3O0AX(qO4L4XlsJ zqHg3WeBr|Mo?@TKGRAEr26lajr#NJ{&Qne;C4+UIyp^Btd@NQ8nLScFgS_C&7t5Ro zs0~i=M91rcg+7r|CaWT^oL%6h+$XcXFZ0wyzQFcnl*c6KA^S)VXR?m;n683<=ntml z;6bo{IhMmebSTN%rMzDz(nP8(3^H2JDvZG>?)lG#qSPxWlLgbJp>Fesx&`kD7u8tp zhwslme82J$j~rPS{SFK&9UQi8xh@G^$aGowmQA1*oXV9oGE{ zde5l19rpVYT$BF31kfbk$7pj49HJxNw|=kCQ!4KL$}wVFW>oeEGJDq%XS$A{pOqW3 zzgz7ag;zET3pP?PIp9oim|wy{fizso3@RvxrJI6$wmWN=p{Ovs%(F-2OAIrz)s}J> zwWU;pDfW{j)DOvtw;!^>OXx}Xdjumv8{R+^0$iEI4^T2KNW&Ldd z6Crgc2S+K2O2)E&vu9cRxOw!F>kqj{M@c1F<*?jV z492ajzjnFRubX#3TwUM?{-O(3`v?Aznm+KKy(Ie`AG<^gsZOGWM>=h~hSzoN?gh8Z zDtMR;R~J2=qUk2_w_nD5io9&U5e(_djbP=CV7TH8{l&gie^RIcQ}C4Bhs#-2!I##O zZrC^OeBkjAQDwgdt$O#$o~Wn&cu30e5c?4!pK_0&Z%t=0I#NH(U89q_C66sA$S;gzz&3rX!KvGpWIrA3Xm=JjM2CpWD-qX3)tIdsQ?XW*9x z#d&JRGRm$djqQ&Td*J+LWR69UC1$Ex+>1CQSy9X_Iy0t3D3H+9ejbC-dU> z#wet)NqyKZII6o586txWI2+{|r3TQ5gg)?YDP%J71LXhl_1U z6_i-9Vud1AYAw^MrAn=>tyQ!Ztk}{zS{$wYb|mb*!yegT?;Tdyd+!}~fUx&`uHg51 zpXc{}-p~6-a*}hMbD!(J?rY!IeIIjG6h$mh9E#$tf`j0@!tgf&0TUbQ;A7E8$a_iw zNXpTuA0Phm%~gBk_=FOEkC_Eo?@;=IGOPFr2c-me0&6v>o>FxltmJCp(Uo7}xHw%{i>I`&bzZ??{qamn2P3~vk^xjq+ za{6b>Um)9?M7aZR(m5REU=$}yyhz;rm4(%_htGdkIQ8^7xnz&_C5s<^`O*6Mt7nhE z`!d*%C>%L-_O2JnY*L1(c>Ttw))tSffLl%By`mP@mn>db9-cuJBoI(4oFJ0Re^K$y zbkkG+kn)mT^)Kw&aGioEG5yH9-jOp@Aepcbla^LRk@{@K%NLV*SIp6ii9Fk^;8rQQ<1B>1#4r z=1PI&3>~;sExh!yp?Wj9SYLtgRNfSQh&zdUBBzGb{E9a>npKEAjiRS$Ahw8k{ZA8y zB9sU*S+G6S71D;+`*lu(WulcJ;~g=hoMXP~`0EB2@918Bdm|^ih%W*Ou1SKw9-;{ND=y zIbVpm51Hsgz8%(G;68%q>J*~bn))5-DR_NJ^$+Jm@$e9e`uQ;|*V6Ls>9g-^S>Hr4 z#s@WbjA*A6``N$ZjI1aa#oz>_s1T}R{9z+!$}c4I$Gff>yh0TVaJU&mA?1}K!&P^| zumaHTx1T;})PTam0nzuf`o40jvgmEBkN{Vy`<_azXB*aoYa@p-DgRM4`e47Y^iqgd zkQqaDHja3fjh`_T?>!25%_<*AptFf0^^{F*qN&wWOSNmStbVXm&@hOVmD4=?XMrrX z-^^8(XjO3P*PpC@xb^E#U>#nlZ;&ccg|asVXVR2^Fi=;n-!RZL4%H%;>29GUuS^F6 zs{;Iey4`mt5UNhFthT=v6n>;9Uj|m89oFB{Jt3pr)c{Mu$!&)WHAj|1zqRIf&sXh__1 z>-iH4Yv2wG>#qjUKl^2WJz(Z2OH{ai<6Dc@mM?C-dHU26ywV>mo*afcJmyCtZIQHi zyyPoQL(LPVeErYBC#21*saJ_0Dc$=Cc3yyog$6nzaLrV%oIh%)dFkvYN~Un`G{uXK zgOhau-$OpyL|w@gJ)rdsPCFRIfBx#pORHa>yvas|*GOr-{~8iL#M>VHSpGw?&mo;n5XZl)94^EmKJttcHp-^uXo-}Du~&=9mT=S zzp5Y@X?C2VUNstnH_XeSt8(bqz!fKqXs^VGGYtO@Q+(dgWC%=wL+w9NF~54M-cTN& ztJ8;)0_BKCW#D!?gUee&k{YfI~kBdE4b z!v_ZHpE$edYZfmt&j*uqXus*Jzmzy?r{E}t>hBUG^)>%)r7NAp*WHlO2;O^AyTw|~ zP~EoUrM{-Kf88qy{=V$H;B8Ur)zl4+LC$XN`W#&RZSr-(Nu1i^$A}eof@J=znv^tw z;JFhx6#i26i62^}kou>;57(F?^`D!Xi<%%I-`$fQ%+K9Z)L6-yAQe((2!>Z6bnM4H_NlxCXgLe!C;e*6H7^UZ*l;@voug$AfP` z-BBuEb!pocoHkU4#cYO}Kj(a{dQ#V4I^x7z&mX^3Gqw8hr-7kSz{WMh2e}rk1WZkpR#S$Ln?SrWe^ViA7Oi>|2iB*8mlCu3x1>U&xm{P&~H78 z$Y(Il$g3VwcIS#AoDL=|8d~;X)WXI7d}tTxvF^0$!OSV;9ROACoE(FZ&eusnFO6PV}oKcRDW|; z&QNpaLuJ*Iu*a$@dJ<7Z)nHB9UGzgm#OPlm8j2&L6G}umnhc7m@83%#NsSyq5ie(y z1V}J`&Anliq_LmWZ*J5+d1^aZ~E5^)!W#xx*ts5dCEH~=BN^=F9TJ| zIZd;3hUXs`st2I_17T!W3nT5f|PyM;4_E)xGz zL;i<0lM?JR|KDIriu{kE=8HrB$bWdz7gaQuADTAwruZuMA57aalo~VY`;;L=b-rFd z=i?YdAp|}MRd|!UqsIHs$Tu3Uf^B3-ZZ2z-koqb6R5eO~%*frhdJO@+Mt-Z;G^HBe zc`p$^lP}e6{hcxOQr)JA{Pb|Y5hU4F%-^zoAt<=ZQUymO-F#*J>k~`0+mLm!xB&|! zKYZOUbq=H|?v3Jw|2hZ%Rdk1SLDQA+VaL61%HU;5faFI7os8=1i zWuO@obp&)5aq*?j2&$6**Q9}Z(s#|erU+iPVGwj2HmqO(euD+3sk;{Y@s}j@ADp$6 zc!=85$LGLQSx9?-oqX8HO3Lqeqc@Nje{AtPM%8!u#7KP#lPWwzOPZooMhGi-O?MDzrFMEJNcnB*xg;0jmP_Lasfi7wQdGE1Ko0=(Jl_ z9-pe?jGpt7BYq6|f@jZvSG#QS=9&ux)UREngI_E43|*eamlWy7$ZG$Q26JA^*7poG z{|>P}4<6}7Z`>KnfG?oj`Y=psr`|~;Lj|i7dLm^RR0Cz@jZ~I?vxhUNYsdwT$@kk-TEJhY9Ffbi6oQjbJaP==A7+J#o zIg=ORpVaHKiow7T&KX+9fT4?#MbdPR%NQ|bq`vVD%jeJ3Zo%@l7e8H7mjT7EE)zV8 zKO+_n8K_@t_|ZUf^_!12C@z3_qQsq&uA-hX#xcg}lPAClFRpz63z1%bfF|$St$pRwgZktUWbmw^`cLcDhME+s2lr(EZ|7Y8B&q{g z#Yo9%b&n)HWQ|Xqxb^b6_0uFbVtCUSCmNRUkPU649Td@g!I{-;e*oOq<}Vv+V)^Zt$~AvZ~}itulRsE zfz+Qkxgh?GMD?)Vca*S4ZO08nzzvjMYyRGnWalR8C%Qw@A0+CDya8!z)Z33r;HcQ| zYByrYB!5d#DWD(iff3Er=|JV`Wkk1?Z-VuCVfE-nKmJd-J@|cQ9zXZvrNPtjQIg7t z%;HDtU9aEKO^~H1YW1V|%KUxgG$KOPJ&BO)vc53{zEP10zIajnk?K)6XHkWd8b_j4 z3km%7A1qr?bXXe?%z7XbPg1j=NV*6|h@iHoGVyQ<$eni}L!}8&)Cl=Qp z;E)Udz}LdVbo@>*a`IF1xdSPQdg^+&6!kU7Ry*{9#Jcxzl*5>M&G3M}&UqY+FIDGb zlD7R%g9hrP`q_^)k2EX(eHg;Jd&fT|Yv=Yk>ZzOgJos4i>Mwb(9>di}ptc_bBD#-d z$&!6(G_3!4*OH-*BT3;NkxL_~nbm!9pc@Yy_aqDcDXa{q(U5aiui+0cQoq&18#LSj zM-qv#MjnUjNCgbig7km?uG`DvFR%h<<$1$kM^Y4x)PJ{H9S%m*s6k)ywTVjzY5V?8UbCQ)A=p8xDRi6N|1f`i)Y)R8D5$^78?-hZyk;Lxpi_e{OJ z2W~s1ieHoNop1m@yzjtXMf01-i91u@Z+E(rC*r@de559wSve}%uP9^o5gxrro|o7D zAMeySJA|K;IjkVr4;$J*u(pamLx@Vxv=0*%h=gyRLG9QRt7pe=DypDnGIvmU{EF@w zNw#CKYphjqH*z2oODoR*_O z&Wor_#16xnYDoQ}@^^2_naL0l_&y9ZXo!yDUf4y!ND15{Rro*}0Dll)rh5}1shyT{ zgh}rMxSQynLpRUKm?1Z1%;ex6nS=Od-3thE^ydo*EDc8F^N?y?UBv$xLV^{LErbim z$lfJo)zfn0KqnjrB^=^FdJ6{*r1_*{Mq+h*a+G|%9JoseGBBhpF0p`S$Ke~ z**?nmCdUcH-FO)#D818H#jzI z<%}@;1tVot4V5}B(uEP=x*S#h?=A`G`|$cR9O0uwL~R{deTgBb<)kxS7gb4UDoGhX zk*LE&kS2L1Qhl3=$fcoMEaV0YIq@~|91)g3D-idh4Y=0-?h2=DWCV*a3XGI6n5Dwh z5QGRz_hBU{s)IWY|J2aaq!(6C$-|16g|+qb-wh@aAZZ}Q{mohAx2I>3U#{ruo|VO9 z%)TKaTNB8&3B=+eb`T%SWog|71NFHQqJ?wTRg{5!7nC0g(NjS9ckrGGY*^><3PT=i z&B(!a8WQ8DGz=x&{rSF}nJG}#$oMqb*xJhC$v4leet7=K^7C6SYu8U zzd5103(8tl;0p#+8Nv_P6B`wN1_Yeh>Ogcm5IG|wtq#Q55f1~aaFw(dcj-GE2^bT( zd0kdcnjeELQT}-OE%uzcavJ<>=?4@a@)=0gAMam&{=+M2lKXSZmllsKURr2b{ceGr zltof^&-5O~e+n{fd;4vW6|@_eP?m_wLOK?*iiiDngAggo>vSEx@1SpHrjau4;&18qF&2C;J3Rck?QIMB^G6%Mf3+g{ zLT5UzFia~{DBzDl3l_zw-=_z6T0mu_Pdfeeq&#HjG~VrgUw2aWy|WhI{Ja6z{iD)w zvGq3Y6e5F*y%`_aK>X6rY^Jtnz+~m=&4L-51QCWieDg_b?epPZxze_WbcM z@yU`*_LKYK$I}3P+;s(@zb-tP`_}uP7d`LaJ%0Q1C5K)Ga-tJJ4_U{3Uv^u5<1cvl z`j2;Hojq=6{rj+->5=vE(5qt`XW^2EPo5f*4?>4gbyM?ewYq@{F?SvnK0Nh>YT|<{ zGD?0^a1CSS;!WfcXX(Rsq5azLO+{xvzk0HEjjnUh1%N3H2D%L7Q5fyDsm;poxyL7B zM2)XD{{HFMnifF0U3!3@`OEnOH~xOV^}5Y*YNeXV_t_6^2)_Ze{P;!aT2JozGc`)S z`rlGgr&5%DJLdl3JZzeC=*+tbsxxF7Xh)}Z}j<^I&|@PCXRy=MZqrS-+DUXoJ#(dFKIjT5@p62pvEQ(yH*R=3E{0Q@v! zs+)~GVrf4-O{lx>bn$z3wt zK0wWr&jZx0S84H?)-MX*UjIVk+Vak>#vgCBX#=$O$hrT+rTA@)Ls0U9?G>P%6mp?tXmb$wZKUzxfP|`7NP&j-VKd=B75^h^r z{=PF6c8GFHhXA&2Xr2jJ;vN_sp2p#qrr{dg&Sdl9y|~>fnmW4Yb1W)hW$5#CnGpw6mzcgXz26^ z&z7L~5I@2()98xuY(bw7pN?V~lHumcjV{-CiZ-%IQ2^4!cul1&73Z^hQOsen!Og~Z zE&e$)8bi=G-FC@d=0)UjMo}!7Tamz@peIkG_mKc-GA~UOxttNsq@h?sM@U0$Xhf|a z^ezlQ3$*#@T1Q`I1&YP^I`}ko&sbs3*C|IOTYgI}6{IV=|{xjT%Xv1KVKOO2%xNy}RXk$e}1bgOD>4a>-RB)~?n z{Eb8A$e^ko816$)erT%b2xl3_+U2?z-`izW%0r^ZnH79v^0acjAHJ#%%$awtj#J)G`=;FWpTkI zvuJBFDXNyS8%wWfDqEvQPCFUnIEm`XB|78Y zn5;BNV)G00c;~SM-*IZ}X1-@8`UYt+M$``ABLlX_Y(h&dlGI!VH^W)3J$W;zzMLch zr++^{NZSZpUgXuGn9Bq=u+=HH`w=`X6-@^Y@f7;I4LZ0p`6wpv<+o;L6bSbLu!eBd zWR2?_(#$O6y~5nssjht^{Ry@f@qZNBf&;3EZ}V6lZk;LM{DpbAcV-nej!Q1VZCi_1 z+U^f_7uLsD=JJ7k?89iYtFdD(?utl42)DiWc- z?|%4pSl(-~Y?##(ZXR%Stb9yQ(gk7?r05C`N4^t)lRMPn9EN>8aQjjrG9lfxbul|!<4G*Vh z(#abh^W7FONLX&jY<5%qU^D}zBNOQFz{!GM*SfrI2MiR3DI>OJ4GgPtWF?V$N|K8nw=Dwg^o@I?y+uOcHww z8BT*MoR2WRYht3m?b`4iz@(ON*Mu2K^-JjxFt1|XtJ&k5P9aP;h9NFf75GSV{>UBeuhC$VH^)^8quKd5TfV6-{Mp{F111RNLC-oa9ob0uRHL!3eFx$Qe z7PX3*H3CMZi14RrW>RIm8zyb(I?>E%b+NMiwxWNCxgJG3h<7p)~ zVd&flY4?XDqho2k#Vjxl2`O#8^_+!9Bj}ij@;-DdNf?pR?h(aVM6r}2n(Nw1eQ_>& zZ}>+OCH!cSBzVjKupdQ z;y~~>XHT?wR;TB)y#W!4D7AH}+xbIyT1K9wsp;_O%1T_mJxhXOtofn&<^r#&QR57x zAnNg*)RIZok~^Js5(^%VXZt4Dx*i7fgEY?E9}Z&;rRC?bz}~yjOB<=}8F@+oiXi-P ze}>I;^&Tx^dZAd5c%;9PQQ!6nJgbz#g`Q2$mX6VA3H*F;AtLg&3!Eg~BJ>X=5X^%j zzI$}wq0G0LH!}>07~EBZkV{1 z=M34DNAoXWp>h0yM&3YKFl8IbLGPYAfUmTtR~3$wf<5wA4jC`tNUAC*KgkWWU(qvZ z7gf21)hzSOFj|!7KxuHkFbYO(5lsUw-{Ni5Zw1@1;BnCvC8hIa%eLP@7s?3tlLHovy|0z`^n`^2xI+vDVK@o-;t+*EO`PdNivh?*81UlJd=Nn~18yfU#J+nlUP zn&4c)B9~p)96W8NxajYQgrIkS&ZDenz&C*NCFU=3O=#R068TESsR7l4v8i39&G39u zSm0Cnr$pCA^!b&?f=*~>Is$Mcdem-vx{8^DV!3V1nyS+7#u>^0QYLy2(A;*Tw=Rvg zF&<$4n+>AwaAu?lC+RqNhU4||&YSllxZxib=+@Iye z;i6dRc2#G1X?>q1B&7P#-_7^qD+@D1)&+i?M;O;Lpn6DD^Z_8cJr4?dp*wcIRvgRr z1WUUJWy%fVjoGL#G9x?!sFb$Nap%ix_|GxdvJ|GnMuqEBK&2PG1F(EijxcLzmh%fH z@Xqja>k(J{0kAFfE}$7STVCrN(98K3qot&e#JX0*li_();kehP;~dtw{dxrhj8SgD zwjg^jb$JAKIGp+H8xx<*a&T0g)CiF3>ba?Co3WFJTme&;^w`MFjusZgWl3r>t88xoB8>#P>FTh1SSvr7U4&w^mexdWY88_O z++5`BWnQ_-7BC9Z80jdMU)D4?!b*AqBwmeYk#`YHl7zIX&h&XMnCGBqu1A7L<6g2D zkG=*(hHOH%mn)i>Dww0hZ;6vyJRzX`2${8HASAOfd21^sgcSg6;#uMn6g;`u4Zuc9 z5!}cq&v&gAWbjKtc(S5A=1X0(MJJJp9eol<^_q1S1aXO&e@hoHD!6a>B=q2IY~ej! zoEZ=<=J}#nd=Z--UpbtBKl6~R+eLZOce7l7Cs#5(UTV$Nd`zF>i3f(KD{T(wzhAI`6m|H z!YFP^N^#o_uz5!|rRPplqX2Bm0!s#?_OK?y3$h!|0c=5I&BS-0-B;WY!?VEbdMaDQ zvm*Ob7Q2LlFgghZxq@*)BkvH#UoD!PFI}biqt(b3Iu01Fcx`zP43_f@G5^_EdbdMj z=AS^vO;lCNmoxF9jYARaHyF)*R?;!W+sLA*BXj8M6H0h?-b`-koDWB&ccy5oXmGs7 z?pMI0*|-wgoTAsQcO-d&^Uer!j%X~aN&6Q@JeSf5M2_+q=~#&xWu~E+%_f(Vxe{2r z4gTgz*pAwC>fiq1E3oLKYX(}&DF!m|5IW6Qso0a}UtyS(`{F9h(N=R2ahhKjW zU<|@*lZcG%6rVL8hAGC+aP=6P5=W7QEHnw8HCf(G%}E<&gL9r2)Zo;ymN}entccVL z?Z7B)#|>`IGYj}#U%sEq^Y9u@z_ z&9>ppQ5HE?L8YpoZDE-j5F_=42pJ}{t%yDD{)dj&ofR0;MPjAXS+upGZi|}6x>~3wcWo(iA3zt`#|TMfV`^C zv1BG5Zo0qKeeAk2u|L#YaA~irI-Z^v9Kh2x<40G>_aO%u#O;Z3ivLoo3u1_a$M|)h^ zOcaX}m&E1`G&WR&b3B8l!q_ck`#QVYyRogX@HW0vcuV)%Dma}DiMX`OD+y%`e(@Gl z2FomCgoQ@Z-R#dHYsP_aSAo#8QB+gOiH1NMZ#F&WeI+!_=(#H&BXZ?x{Z#Sl_6+-e%$BNmL;@!1hT5TR>hcB*|)KjjP( zPVm26{ zO=E{ATS9%B8ScR3Et`JZ<1Vr75JMCa^zRoYdsGT$OPTFptdas6x=MsOzeAJTQP=M` zHl|UXsr^hCncTXlFiuEt+ZWJa`OdIKwMUMZ4fgUFR04le#y5CaNmF|$g#D)}KjB^{KG1sZ)2DIn8BWX@qt zy8SeDB}+`T=tGu_UxSCyJX^prSgz z+&j{^4zGeAM1O~XaV3eqHCMvpUBRe>ZFVi=S@sHk`;aNZ(aSy_tSWZI0^=EG*AVQ` z+2oOGxd&OU`1W#d=?cHfjmtZNWr~6~I5F#<Hg~a8|4Rw^*k`M1i z80a0iuRWn4aB13;c?63rNuxK0GJ@uz55WhJJtp+dz@9|MX6{AIbuePi*00a6SsDc$ zsFydH-Yo8Em=0qf!4f0dDx(@lgS-JWNH9~29(UNJI`*^q3k_J-4?K(C~p6S`ok6 zx4>}(vvC(>G{HS9XwK(6YF6>WB!oTIGFLjvy@o~dw^(DW8pyYNjCc}0HVN3Q%%97S zV%)`Yed8J!>)B54kgt$;@>mna&BBuIEcRcRzpoo-HIe2yPhlZERGKp=E4Q60bDH6S z?`qeuImN2yK=Jc0j zhnI@H9B}~%mk?l@=MvdoXVb$1jTI=#j7s7+W<;U^$QmKwvRmzvS7e3_LlLtLnR4ex zCQRXuA`LsahL+Hl=#-2OhAKuM<}7i=j{Sk)4pu|z9+Ljq;@t8?Ki<1oK7E^8xzbRe z19*(Wvufh(y~kHWd60*2aA)Lt7PpDrz+oRC=v^+WtnQib6mgDW*#UIdmaw`F@+uh_ zCa7K(EN~On96cEZSmZ>6=X?v3O+LRL>E6lA2S-(Oi4(H9v5+IG2o_aDJ5c?=drqQ# z1EM?XH{j6~y~#g_`AB+l1cQAgSCWwiV+nXLS@ABJAp_ei7f3r5l^3Vg#?7B2$H^a% zRW_mL3Ikg6+86|k=bAI!S7^iPfV^HC`Ua@)SVRw*>#JvzF~?SX-_Hr?&3qq-q;Z|W%Lf2ggFB?()`}^@( z5$)Mx?>YWsj2%}Q=((7o0$LDV$J5W+@IJX?amqi&dU9mLBF@)E^&MI@`GfEZ&3O2NvIS z5q6I^$FknRQkFx7^L1V2Uz05`jlCvA-QMXLn;h;_%(#*tLl1B8Vt=2?ur+R(z#A=NMGu%_}D-?b8l+EsINz6U4#RN_gRp@$%!>v*Ol! zxDc*;q@*{7`LtCZ2VkZ6go${ggR?y~%9X*g1Px>T+wC=R;8m0YFVMnL3kE|y`M+Wz zzQM8awVk=Jw+ylXIi21tNB>IiPA)`QkwHb3)12zDAHZbHKyD94XLT#rkK+jW$qc$H zpD&?1kf|!js>NfVWsGFXH7$Ie0Zw;@w_s~Jq@|)5jPEc(?@kgmlIai_!o7#t^Jk*! zo!K_3Fa)tE9d2{(YTOJj6iaiE`HmaoiFu;$I{kHXZ*G zV6_C*J5iOhWix|*jIXiukao|QdaCnnAXgu%s-}eZaY|mRDHM|5#k4sSvwjXkt@(cf zrfxfWe!6FHR=8Tn2R|~rCnlGZn9`OBP9|r^Kn!lMyb44#P6e2j#h&G)9-l#>GSsdd zy+0=m%HYL&@}6O74KpIfMwf_>0&m8nM}ap(61#hN&LvztmJl*sUtPO+0kE7kP68Qs zjp6l_mkjbPF}u+ad;7pGzyAO%5B&&WlU}2aX={00eaycmteoZPF?JeY6O^j}b8=x+ zm5OYbA7S}1^vz1nwCE(jVh9)SEQ#lfN+cWLWQK~TDmMmKGZX+)2Q;ZJv119=)A^rc zp+RvWl}X#p{{qAvAk(qKjw!BFjQ22?np!r)Be_%tsMWd?y2Wp|xQ-?V^En_rA$95P zn^X4W4-X@~&}mMM&9IBOfeV3rzQ{J*#=$QA6tu{+Fa``obY5L%3Kb%z#KDE7QDMl! zKS1Opnmn+>Xf->QDk@+?e4L(OpENhH?J-KZgt(w)K&?Pmk>g5FARrSNLhY{Z3zeo= zlTj5A$HXya?ucg=?;XsR*_1Lfmt+%2$wVe8CjmBD?-pcVy2;eQQWAofd#h%P*NjV% z`5ivlu6N~jWXB}^?RJJl8>Wuyl z?t)KiW2jw@klcDiEp0PmC~sjtq8DU&h@fwhT;W_@ky*st53!a@NO&B>Qvl|41jw=; z(U25hv&GE>@0#vDOz+5P-UBVB5bl{QjxBE0#b(LGhgXT;3~aF%3Nm3aln=Ca5YX zh7pfqe8-isg+#%CLgztv5sK8#Ei{(9g-8~7e_}RS#mN)7V>}SM7E}-PSL&E!>Okxm z+X7>|7W%FEg-u)_TO8aOb9OzeROB4V)CY!57`KV9T`jhlLl)3GFm}ZoE8Xq=0tN_V z4u3&18Zx;(3Hl}j)rWioox74+=TXD8#OTvuoW4Su`*k4n#2|~zKWfq$&W;VC+ws&f zUT*Amx3|-%2iX46oe|=MIrck`3)t3>sV|I4ZKU_VfEy({Ba}k7bL?sraq~d$yC(;) ztQN)p2y8Zv9)&z#Cp#@MGbEGu7z=l{&v9^@?70g!QWqf47r7vqO5L(&`$1lDotnX4 zwRb*1zNdi9E4a#=w2a01E|hZk*RXU~Mz3U9IQopNriv^nJl$&&Ha^hC;}o+JAcM}g zpXE0T$`y3v5r-WeP{N)osdq@=Zlf5N#;WLO^-CsSRzkMKK9}bs#v>kk8JQJ_6wauffda4t_`>w%YParTatld?zdX|# z>K+lN&~gOHw)g?GRepMBLU-?s&`#Uidvv+8j9q^bI@kn`E;rh9SV9YCG^1E*(kQ2O zB`Asvt4z88#LM)^NQ(<%ox>Qe869r1ZPnYxFF;Elz1)->(G$YjVq0QvEfZl|e#}YO zd<7ZV;iRo0LI`*p)UgXph>&wq(iy%gdC`)UJD=Ix~ zKB;i{U3@P&MBPaK6N@K5KxwPA9^=ZhAFZC9zI$BmCPM!GxKRuap9Is<3VXl)?<+|Z zepZ(|tfaFKQIi&1oV}oP{`hgZlSkc;Yk5x!dy1rnHFkO!30_0)P=F^mkQgYGaNbS z5CC1NjkW!;$$YTAfxaCZ?*2^!w?K-rcl5qzZD>VGDqk9}c~)^&OE?{pRZ0{h*s0C% zj9ywB-E?Au^G}=Lb<);XLOz9Nvj|6(9n(W*a%Tp}PmZhDu7+0fBsHVt8%hX>Cg{MK zt%c>Oc|Xo!%z4^*ro_pY`UV)c05oovw`)+lZ!mi=<~d()pE@xd@-Co~zT*PbCufPn z>$4dU;AT|JO%#=6c=%crB0}^S2()XfTdts|jC}=*n`lpS=%yxG>;vIb1!t@=yQrPo zzrYOxCr&gQ6dB4)FeU${4D67wSh^sLwqyJUb6)4v^XoX(!?agO;ZDaV3x-^T%zSPZ zL{dqCrS)!Z8{^VMCImLNGYtcI@m!7sba<|lWM!)xs~~PVP}DkmNJSBNJhK$q+F8WCi6vI}*Yetz18J^E?vAb*O&axZtWM;Cwe#rm5afsQQq5tcc`(v- zn`vU7q7<@NLB)H*c4M@PCL%8#MMJ-a5}a|qxG0+A!1w^8JBJD){AzDv5G!qh<2uV16>UuAsJ&+c66^D`jV(scr@F zp|-26@35GlCiZMjMGUzYuRZ}9*vB_}$Rm4%4{5^$!2-QBc_p~rSQFlWr3coaD1Le% zWMP^KMMgPgMQtnDj1YyAJrr~DG16t;NZ_hCkNqJQStJlNd*l{)K;Z?{>PqcI@+LFX zW1Ne_QfVtZSEsUd@&ybg-BiK<_Y;;^`g!*74q)kndCu-}4*cOMgo_@9+uHhTaw9vt z`SMs;kN~PR}z}D*bwwow(hQ z0ue`)XGf-wu0hBgy`y1mTLXGB9qH`5n15DxikEG!LmWg7MLQ4lsqTxkiR43+7(jEL z@r*9=1#?`oW3=p-XG1(j*BHSNhTDooQL~K&q5y;eI&XAie6laTmScgrddB#-caLl< z96t_|z0$ZowZ6H?J*uAto-i#U!`(5L=3E3#M(m71I4iQxV+8L3$r!J14IPY$VLS z0nG}B;{_?R=^R-sXL)8dF{^x%Tt|Ra&C&`fFD6eMn_|ZWla)}BvozxBQZh~1LLxz| zhDJnjp2H5z88w^5GM-~(Y+W2k6(3|Ve}tNwN^NBfLrk&g;o-UL%5sHlaTpL&5ue@I z-V~QN34yMguir{_b-Je!?MHmvE0Bx%|?YV*o_G2tT=$P5ro*Jca4}*l;QPU2OJa$i7FB@_I%nr}^ zk>R#*4RR9(PLRv^M&y{cCmYx9vf+5C{G+^b~AeLG{6 zn!a8d8<5J~010yzclkQUEpsfmAP~|jzRED46!w;c4_rZSnzx-89h#y*x8L#_Gl5s= z;VNWO!R@tY*2a$IRAm_lAW<-e>yCk2rGb7di0B#I1_%2r=3p-jZ#3bUiGzo|=L{`@ z356+%Nm*&*OCy;ICr%+TJI`AWsS8@{%;14H;@MHUHMq6DU=7cU++o8lp3pYS$%O?5 zfe|rZ>E@kk69N