From 3ebb00935f3f0522b75df49c2769ab1774b91380 Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Tue, 15 Aug 2023 06:14:14 +0800 Subject: [PATCH 01/71] server : add missing /json-schema-to-grammar.mjs (#2616) fixes #2611 --- examples/server/server.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 222dbcb43..99660455a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -15,6 +15,7 @@ #include "index.html.hpp" #include "index.js.hpp" #include "completion.js.hpp" +#include "json-schema-to-grammar.mjs.hpp" #ifndef SERVER_VERBOSE #define SERVER_VERBOSE 1 @@ -1218,6 +1219,12 @@ int main(int argc, char **argv) res.set_content(reinterpret_cast(&completion_js), completion_js_len, "application/javascript"); return false; }); + // this is only called if no index.html is found in the public --path + svr.Get("/json-schema-to-grammar.mjs", [](const Request &, Response &res) + { + res.set_content(reinterpret_cast(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript"); + return false; }); + svr.Post("/completion", [&llama](const Request &req, Response &res) { auto lock = llama.lock(); From b5ffb2849d23afe73647f68eec7b68187af09be6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 10:04:58 +0300 Subject: [PATCH 02/71] scripts : add helper script to get wikitext --- scripts/get-wikitext-2.sh | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 scripts/get-wikitext-2.sh diff --git a/scripts/get-wikitext-2.sh b/scripts/get-wikitext-2.sh new file mode 100644 index 000000000..98aec3e3e --- /dev/null +++ b/scripts/get-wikitext-2.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip From bf83bff6742c0f1795b4c18695a13a34ac7adf62 Mon Sep 17 00:00:00 2001 From: Shouzheng Liu Date: Wed, 16 Aug 2023 16:07:04 -0400 Subject: [PATCH 03/71] metal : matrix-matrix multiplication kernel (#2615) * metal: matrix-matrix multiplication kernel This commit removes MPS and uses custom matrix-matrix multiplication kernels for all quantization types. This commit also adds grouped-query attention to support llama2 70B. * metal: fix performance degradation from gqa Integers are slow on the GPU, and 64-bit divides are extremely slow. In the context of GQA, we introduce a 64-bit divide that cannot be optimized out by the compiler, which results in a decrease of ~8% in inference performance. This commit fixes that issue by calculating a part of the offset with a 32-bit divide. Naturally, this limits the size of a single matrix to ~4GB. However, this limitation should suffice for the near future. * metal: fix bugs for GQA and perplexity test. I mixed up ne02 and nb02 in previous commit. --- CMakeLists.txt | 2 - Makefile | 2 +- flake.nix | 2 - ggml-metal.m | 171 +++------ ggml-metal.metal | 969 +++++++++++++++++++++++------------------------ llama.cpp | 18 +- 6 files changed, 528 insertions(+), 636 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dff4942cd..01b40c2e8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -296,7 +296,6 @@ if (LLAMA_METAL) find_library(FOUNDATION_LIBRARY Foundation REQUIRED) find_library(METAL_FRAMEWORK Metal REQUIRED) find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - find_library(METALPERFORMANCE_FRAMEWORK MetalPerformanceShaders REQUIRED) set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h) @@ -313,7 +312,6 @@ if (LLAMA_METAL) ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK} - ${METALPERFORMANCE_FRAMEWORK} ) endif() diff --git a/Makefile b/Makefile index 070ae1242..5b801d16f 100644 --- a/Makefile +++ b/Makefile @@ -283,7 +283,7 @@ endif # LLAMA_CLBLAST ifdef LLAMA_METAL CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG CXXFLAGS += -DGGML_USE_METAL - LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders + LDFLAGS += -framework Foundation -framework Metal -framework MetalKit OBJS += ggml-metal.o endif # LLAMA_METAL diff --git a/flake.nix b/flake.nix index 4178e97ff..616b90252 100644 --- a/flake.nix +++ b/flake.nix @@ -14,8 +14,6 @@ with pkgs.darwin.apple_sdk_11_0.frameworks; [ Accelerate MetalKit - MetalPerformanceShaders - MetalPerformanceShadersGraph ] else if isAarch32 && isDarwin then with pkgs.darwin.apple_sdk.frameworks; [ diff --git a/ggml-metal.m b/ggml-metal.m index fbac21e3a..e13cb4b3c 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -5,7 +5,6 @@ #import #import -#import #undef MIN #undef MAX @@ -79,6 +78,14 @@ struct ggml_metal_context { GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32); GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32); GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32); + GGML_METAL_DECL_KERNEL(mul_mm_f16_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32); + GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32); GGML_METAL_DECL_KERNEL(rope); GGML_METAL_DECL_KERNEL(alibi_f32); GGML_METAL_DECL_KERNEL(cpy_f32_f16); @@ -110,13 +117,6 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { ctx->n_buffers = 0; ctx->concur_list_len = 0; - // determine if we can use MPS - if (MPSSupportsMTLDevice(ctx->device)) { - fprintf(stderr, "%s: using MPS\n", __func__); - } else { - fprintf(stderr, "%s: not using MPS\n", __func__); - GGML_ASSERT(false && "MPS not supported"); - } #if 0 // compile from source string and show compile log @@ -196,6 +196,14 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32); GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32); GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32); + GGML_METAL_ADD_KERNEL(mul_mm_f16_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32); + GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32); GGML_METAL_ADD_KERNEL(rope); GGML_METAL_ADD_KERNEL(alibi_f32); GGML_METAL_ADD_KERNEL(cpy_f32_f16); @@ -506,7 +514,7 @@ void ggml_metal_graph_compute( id command_buffer = command_buffers[cb_idx]; - id encoder = nil; + id encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; const int node_start = (cb_idx + 0) * n_nodes_per_cb; const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb; @@ -515,10 +523,6 @@ void ggml_metal_graph_compute( const int i = has_concur ? ctx->concur_list[ind] : ind; if (i == -1) { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - continue; - } [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers]; continue; } @@ -592,10 +596,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_ADD: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - if (ggml_nelements(src1) == ne10) { // src1 is a row [encoder setComputePipelineState:ctx->pipeline_add_row]; @@ -613,10 +613,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_MUL: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - if (ggml_nelements(src1) == ne10) { // src1 is a row [encoder setComputePipelineState:ctx->pipeline_mul_row]; @@ -634,10 +630,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_SCALE: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const float scale = *(const float *) src1->data; [encoder setComputePipelineState:ctx->pipeline_scale]; @@ -653,10 +645,6 @@ void ggml_metal_graph_compute( switch (ggml_get_unary_op(gf->nodes[i])) { case GGML_UNARY_OP_SILU: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - [encoder setComputePipelineState:ctx->pipeline_silu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; @@ -667,10 +655,6 @@ void ggml_metal_graph_compute( } break; case GGML_UNARY_OP_RELU: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - [encoder setComputePipelineState:ctx->pipeline_relu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; @@ -681,10 +665,6 @@ void ggml_metal_graph_compute( } break; case GGML_UNARY_OP_GELU: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - [encoder setComputePipelineState:ctx->pipeline_gelu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; @@ -701,10 +681,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_SOFT_MAX: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const int nth = 32; [encoder setComputePipelineState:ctx->pipeline_soft_max]; @@ -719,10 +695,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_DIAG_MASK_INF: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const int n_past = ((int32_t *)(dst->op_params))[0]; [encoder setComputePipelineState:ctx->pipeline_diag_mask_inf]; @@ -740,53 +712,43 @@ void ggml_metal_graph_compute( GGML_ASSERT(ne00 == ne10); // GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere + uint gqa = ne12/ne02; GGML_ASSERT(ne03 == ne13); + // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs + // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && - (src0t == GGML_TYPE_F32 || src0t == GGML_TYPE_F16) && ne11 > 1) { - - if (encoder != nil) { - [encoder endEncoding]; - encoder = nil; + src1t == GGML_TYPE_F32 && + [ctx->device supportsFamily:MTLGPUFamilyApple7] && + ne00%32 == 0 && + ne11 > 1) { + switch (src0->type) { + case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break; + case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break; + case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break; + case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break; + case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break; + case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break; + case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break; + case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break; + default: GGML_ASSERT(false && "MUL MAT-MAT not implemented"); + } + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3]; + [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; + [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5]; + [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6]; + [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7]; + [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:8]; + [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:9]; + [encoder setBytes:&gqa length:sizeof(gqa) atIndex:10]; + [encoder setThreadgroupMemoryLength:8192 atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)]; } - - MPSDataType src0dt = src0t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16; - MPSDataType src1dt = src1t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16; - - // for F32 x F32 we use MPS - MPSMatrixDescriptor * desc0 = [MPSMatrixDescriptor - matrixDescriptorWithRows:ne01 columns:ne00 rowBytes:src0->nb[1] dataType:src0dt]; - - MPSMatrixDescriptor * desc1 = [MPSMatrixDescriptor - matrixDescriptorWithRows:ne11 columns:ne10 rowBytes:src1->nb[1] dataType:src1dt]; - - MPSMatrixDescriptor * desc = [MPSMatrixDescriptor - matrixDescriptorWithRows:ne1 columns:ne0 rowBytes:dst->nb[1] dataType:MPSDataTypeFloat32]; - - MPSMatrixMultiplication * mul = [[MPSMatrixMultiplication alloc] - initWithDevice:ctx->device transposeLeft:false transposeRight:true - resultRows:ne11 resultColumns:ne01 interiorColumns:ne00 alpha:1.0 beta:0.0]; - - // we need to do ne12 multiplications - // TODO: is there a way to do this in parallel - currently very slow .. - // TODO: might be possible to offload part of the computation to ANE using Accelerate's CBLAS - for (int64_t i02 = 0; i02 < ne12; ++i02) { - size_t offs_src0_cur = offs_src0 + i02/(ne12/ne02)*nb02; // gqa not used for now - size_t offs_src1_cur = offs_src1 + i02*nb12; - size_t offs_dst_cur = offs_dst + i02*nb2; - - MPSMatrix * mat_src0 = [[MPSMatrix alloc] initWithBuffer:id_src0 offset:offs_src0_cur descriptor:desc0]; - MPSMatrix * mat_src1 = [[MPSMatrix alloc] initWithBuffer:id_src1 offset:offs_src1_cur descriptor:desc1]; - MPSMatrix * mat_dst = [[MPSMatrix alloc] initWithBuffer:id_dst offset:offs_dst_cur descriptor:desc ]; - - [mul encodeToCommandBuffer:command_buffer leftMatrix:mat_src1 rightMatrix:mat_src0 resultMatrix:mat_dst]; - } - } else { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - + else { int nth0 = 32; int nth1 = 1; @@ -885,23 +847,24 @@ void ggml_metal_graph_compute( [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14]; [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15]; [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16]; + [encoder setBytes:&gqa length:sizeof(gqa) atIndex:17]; if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else if (src0t == GGML_TYPE_Q3_K) { #ifdef GGML_QKK_64 - [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; #else - [encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; #endif } else if (src0t == GGML_TYPE_Q5_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else if (src0t == GGML_TYPE_Q6_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; @@ -910,10 +873,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_GET_ROWS: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - switch (src0->type) { case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break; case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break; @@ -939,10 +898,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_RMS_NORM: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -962,10 +917,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_NORM: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const float eps = 1e-5f; const int nth = 256; @@ -984,10 +935,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_ALIBI: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - GGML_ASSERT((src0t == GGML_TYPE_F32)); const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past); @@ -1027,10 +974,6 @@ void ggml_metal_graph_compute( } break; case GGML_OP_ROPE: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; @@ -1071,10 +1014,6 @@ void ggml_metal_graph_compute( case GGML_OP_CPY: case GGML_OP_CONT: { - if (encoder == nil) { - encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc]; - } - const int nth = 32; switch (src0t) { diff --git a/ggml-metal.metal b/ggml-metal.metal index 8d26b5ec2..3f3125236 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -18,47 +18,6 @@ typedef struct { uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; -static void dequantize_row_q4_0(device const block_q4_0 * x, device float * y, int k) { - const int qk = QK4_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const half d = x[i].d; - - for (int j = 0; j < qk/2; ++j) { - const int x0 = (x[i].qs[j] & 0x0F) - 8; - const int x1 = (x[i].qs[j] >> 4) - 8; - - y[i*qk + j + 0 ] = x0*d; - y[i*qk + j + qk/2] = x1*d; - } - } -} - -static void dequantize_row_q4_1(device const block_q4_1 * x, device float * y, int k) { - const int qk = QK4_1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const half d = x[i].d; - const half m = x[i].m; - - for (int j = 0; j < qk/2; ++j) { - const int x0 = (x[i].qs[j] & 0x0F); - const int x1 = (x[i].qs[j] >> 4); - - y[i*qk + j + 0 ] = x0*d + m; - y[i*qk + j + qk/2] = x1*d + m; - } - } -} - kernel void kernel_add( device const float * src0, device const float * src1, @@ -219,54 +178,6 @@ kernel void kernel_diag_mask_inf( } } -kernel void kernel_get_rows_f16( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - for (int j = 0; j < ne00; j++) { - dst[i*nb1 + j] = ((device half *) ((device char *) src0 + r*nb01))[j]; - } -} - -kernel void kernel_get_rows_q4_0( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q4_0( - (device const block_q4_0 *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - -kernel void kernel_get_rows_q4_1( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q4_1( - (device const block_q4_1 *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - kernel void kernel_norm( device const void * src0, device float * dst, @@ -432,14 +343,16 @@ inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thre // N_DST, so this is another explicit assumption of the implementation. template void mul_vec_q_n_f32(device const void * src0, device const float * src1, device float * dst, - int64_t ne00, int64_t ne10, int64_t ne0, int64_t ne01, - uint2 tgpig, uint tiisg, uint sgitg) { + int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint gqa, + uint3 tgpig, uint tiisg, uint sgitg) { const int nb = ne00/QK4_0; const int r0 = tgpig.x; const int r1 = tgpig.y; + const int im = tgpig.z; const int first_row = (r0 * nsg + sgitg) * nr; - device const block_q_type * x = (device const block_q_type *) src0 + first_row * nb; - device const float * y = (device const float *) src1 + r1*ne10; + const uint offset0 = first_row * nb + im/gqa*(nb*ne0); + device const block_q_type * x = (device const block_q_type *) src0 + offset0; + device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; // src1 vector cache float sumf[nr]={0.f}; @@ -470,7 +383,7 @@ void mul_vec_q_n_f32(device const void * src0, device const float * src1, device for (int row = 0; row < nr; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && first_row + row < ne01) { - dst[r1*ne0 + first_row + row] = tot; + dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; } } } @@ -480,13 +393,17 @@ kernel void kernel_mul_mat_q4_0_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, constant int64_t & ne01[[buffer(4)]], - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg); + mul_vec_q_n_f32(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg); } kernel void kernel_mul_mat_q4_1_f32( @@ -494,13 +411,17 @@ kernel void kernel_mul_mat_q4_1_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, constant int64_t & ne01[[buffer(4)]], - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg); + mul_vec_q_n_f32(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg); } kernel void kernel_mul_mat_f16_f32( @@ -869,354 +790,6 @@ static inline uchar4 get_scale_min_k4(int j, device const uint8_t * q) { return r; } -//========================================== dequantization ============================= - -static void dequantize_row_q2_K(device const block_q2_K * x, device float * y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; - - for (int i = 0; i < nb; i++) { - - const float d = x[i].d; - const float min = x[i].dmin; - - device const uint8_t * q = x[i].qs; - -#if QK_K == 256 - int is = 0; - float dl, ml; - for (int n = 0; n < QK_K; n += 128) { - int shift = 0; - for (int j = 0; j < 4; ++j) { - - uint8_t sc = x[i].scales[is++]; - dl = d * (sc & 0xF); ml = min * (sc >> 4); - for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; - - sc = x[i].scales[is++]; - dl = d * (sc & 0xF); ml = min * (sc >> 4); - for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; - - shift += 2; - } - q += 32; - } -#else - float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4); - float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4); - float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4); - float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4); - for (int l = 0; l < 16; ++l) { - y[l+ 0] = dl1 * ((q[l] >> 0) & 3) - ml1; - y[l+16] = dl2 * ((q[l] >> 2) & 3) - ml2; - y[l+32] = dl3 * ((q[l] >> 4) & 3) - ml3; - y[l+48] = dl4 * ((q[l] >> 6) & 3) - ml4; - } - y += QK_K; -#endif - - } -} - -static void dequantize_row_q3_K(device const block_q3_K * x, device float * y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; - -#if QK_K == 256 - - const uint16_t kmask1 = 0x0303; - const uint16_t kmask2 = 0x0f0f; - - uint16_t aux[8]; - thread const int8_t * scales = (thread const int8_t*)aux; - - for (int i = 0; i < nb; i++) { - - const float d_all = (float)(x[i].d); - - device const uint8_t * q = x[i].qs; - device const uint8_t * h = x[i].hmask; - uint8_t m = 1; - - device const uint16_t * a = (device const uint16_t *)x[i].scales; - aux[0] = (a[0] & kmask2) | (((a[4] >> 0) & kmask1) << 4); - aux[1] = (a[1] & kmask2) | (((a[5] >> 0) & kmask1) << 4); - aux[2] = (a[2] & kmask2) | (((a[4] >> 2) & kmask1) << 4); - aux[3] = (a[3] & kmask2) | (((a[5] >> 2) & kmask1) << 4); - aux[4] = ((a[0] >> 4) & kmask2) | (((a[4] >> 4) & kmask1) << 4); - aux[5] = ((a[1] >> 4) & kmask2) | (((a[5] >> 4) & kmask1) << 4); - aux[6] = ((a[2] >> 4) & kmask2) | (((a[4] >> 6) & kmask1) << 4); - aux[7] = ((a[3] >> 4) & kmask2) | (((a[5] >> 6) & kmask1) << 4); - - int is = 0; - float dl; - for (int n = 0; n < QK_K; n += 128) { - int shift = 0; - for (int j = 0; j < 4; ++j) { - - dl = d_all * (scales[is++] - 32); - for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((h[l+ 0] & m) ? 0 : 4)); - } - - dl = d_all * (scales[is++] - 32); - for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((h[l+16] & m) ? 0 : 4)); - } - - shift += 2; - m <<= 1; - } - q += 32; - } - } -#else - for (int i = 0; i < nb; i++) { - - const float d_all = (float)(x[i].d); - - device const uint8_t * q = x[i].qs; - device const uint8_t * hm = x[i].hmask; - - const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8); - const float d2 = d_all * ((x[i].scales[0] >> 4) - 8); - const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8); - const float d4 = d_all * ((x[i].scales[1] >> 4) - 8); - - for (int l = 0; l < 8; ++l) { - uint8_t h = hm[l]; - y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4)); - y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4)); - y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4)); - y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4)); - y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4)); - y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4)); - y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4)); - y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4)); - } - y += QK_K; - } -#endif - -} - -static void dequantize_row_q4_K(device const block_q4_K * x, device float * y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; - - for (int i = 0; i < nb; i++) { - - device const uint8_t * q = x[i].qs; - -#if QK_K == 256 - const float d = x[i].d; - const float min = x[i].dmin; - - device const uint8_t * scales = x[i].scales; - - int is = 0; - for (int j = 0; j < QK_K; j += 64) { - const uchar4 sc = get_scale_min_k4(is, scales); - const float d1 = d * sc[0]; const float m1 = min * sc[1]; - const float d2 = d * sc[2]; const float m2 = min * sc[3]; - for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1; - for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2; - q += 32; is += 2; - } -#else - device const uint8_t * s = x[i].scales; - device const half2 * dh = (device const half2 *)x[i].d; - const float2 d = (float2)dh[0]; - const float d1 = d[0] * (s[0] & 0xF); - const float d2 = d[0] * (s[1] & 0xF); - const float m1 = d[1] * (s[0] >> 4); - const float m2 = d[1] * (s[1] >> 4); - for (int l = 0; l < 32; ++l) { - y[l+ 0] = d1 * (q[l] & 0xF) - m1; - y[l+32] = d2 * (q[l] >> 4) - m2; - } - y += QK_K; -#endif - - } -} - -static void dequantize_row_q5_K(device const block_q5_K * x, device float * y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; - -#if QK_K == 256 - for (int i = 0; i < nb; i++) { - - const float d = (float)(x[i].d); - const float min = (float)(x[i].dmin); - - device const uint8_t * ql = x[i].qs; - device const uint8_t * qh = x[i].qh; - - int is = 0; - uint8_t u1 = 1, u2 = 2; - for (int j = 0; j < QK_K; j += 64) { - const uchar4 sc = get_scale_min_k4(is, x[i].scales); - const float d1 = d * sc[0]; const float m1 = min * sc[1]; - const float d2 = d * sc[2]; const float m2 = min * sc[3]; - for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; - for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; - ql += 32; is += 2; - u1 <<= 2; u2 <<= 2; - } - } -#else - for (int i = 0; i < nb; i++) { - - const float d = (float)x[i].d; - - device const uint8_t * ql = x[i].qs; - device const uint8_t * qh = x[i].qh; - device const int8_t * sc = x[i].scales; - - for (int l = 0; l < 8; ++l) { - y[l+ 0] = d * sc[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16)); - y[l+ 8] = d * sc[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16)); - y[l+16] = d * sc[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16)); - y[l+24] = d * sc[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16)); - y[l+32] = d * sc[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16)); - y[l+40] = d * sc[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16)); - y[l+48] = d * sc[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16)); - y[l+56] = d * sc[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16)); - } - y += QK_K; - } -#endif - -} - -static void dequantize_row_q6_K(device const block_q6_K * x, device float * y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; - - for (int i = 0; i < nb; i++) { - - device const uint8_t * ql = x[i].ql; - device const uint8_t * qh = x[i].qh; - device const int8_t * sc = x[i].scales; - - const float d = x[i].d; - -#if QK_K == 256 - for (int n = 0; n < QK_K; n += 128) { - for (int l = 0; l < 32; ++l) { - int is = l/16; - const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - y[l + 0] = d * sc[is + 0] * q1; - y[l + 32] = d * sc[is + 2] * q2; - y[l + 64] = d * sc[is + 4] * q3; - y[l + 96] = d * sc[is + 6] * q4; - } - y += 128; - ql += 64; - qh += 32; - sc += 8; - } -#else - for (int l = 0; l < 16; ++l) { - const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - y[l+ 0] = d * sc[0] * q1; - y[l+16] = d * sc[1] * q2; - y[l+32] = d * sc[2] * q3; - y[l+48] = d * sc[3] * q4; - } - y += 64; -#endif - } -} - -kernel void kernel_get_rows_q2_K( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q2_K( - (device const block_q2_K *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - -kernel void kernel_get_rows_q3_K( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q3_K( - (device const block_q3_K *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - -kernel void kernel_get_rows_q4_K( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q4_K( - (device const block_q4_K *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - -kernel void kernel_get_rows_q5_K( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q5_K( - (device const block_q5_K *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - -kernel void kernel_get_rows_q6_K( - device const void * src0, - device const int * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb1, - uint tpig[[thread_position_in_grid]]) { - const int i = tpig; - const int r = ((device int32_t *) src1)[i]; - - dequantize_row_q6_K( - (device const block_q6_K *) ((device char *) src0 + r*nb01), - (device float *) ((device char *) dst + i*nb1), ne00); -} - //====================================== dot products ========================= kernel void kernel_mul_mat_q2_K_f32( @@ -1224,21 +797,27 @@ kernel void kernel_mul_mat_q2_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, constant int64_t & ne01[[buffer(4)]], - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; + const int r2 = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int ib_row = first_row * nb; - device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row; - device const float * y = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0; + device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float yl[32]; float sumf[N_DST]={0.f}, all_sum; @@ -1351,7 +930,7 @@ kernel void kernel_mul_mat_q2_K_f32( for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { - dst[r1*ne0 + first_row + row] = all_sum; + dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = all_sum; } } } @@ -1362,10 +941,14 @@ kernel void kernel_mul_mat_q3_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, - constant int64_t & ne1, - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne01[[buffer(4)]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1373,11 +956,12 @@ kernel void kernel_mul_mat_q3_K_f32( const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; + const int64_t r2 = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; - - device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb; - device const float * yy = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0; + device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float yl[16]; @@ -1465,7 +1049,7 @@ kernel void kernel_mul_mat_q3_K_f32( const float sumf = (sumf1[row] - 32.f*sumf2[row]) / (1 << shift); const float tot = simd_sum(sumf); if (tiisg == 0) { - dst[r1*ne0 + first_row + row] = tot; + dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = tot; } } } @@ -1475,10 +1059,14 @@ kernel void kernel_mul_mat_q3_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, - constant int64_t & ne1, - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne01[[buffer(4)]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1486,11 +1074,12 @@ kernel void kernel_mul_mat_q3_K_f32( const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; + const int64_t r2 = tgpig.z; const int row = 2 * r0 + sgitg; - - device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb; - device const float * yy = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0; + device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; const int ix = tiisg/4; const int il = 4 * (tiisg%4);// 0, 4, 8, 12 const int im = il/8; // 0, 0, 1, 1 @@ -1529,7 +1118,7 @@ kernel void kernel_mul_mat_q3_K_f32( const float tot = simd_sum(sumf); if (tiisg == 0) { - dst[r1*ne0 + row] = tot; + dst[r1*ne0 + r2*ne0*ne1 + row] = tot; } } @@ -1541,10 +1130,14 @@ kernel void kernel_mul_mat_q4_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, constant int64_t & ne01[[buffer(4)]], - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1560,10 +1153,12 @@ kernel void kernel_mul_mat_q4_K_f32( const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; + const int r2 = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int ib_row = first_row * nb; - device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row; - device const float * y = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; + device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float yl[16]; float yh[16]; float sumf[N_DST]={0.f}, all_sum; @@ -1630,7 +1225,7 @@ kernel void kernel_mul_mat_q4_K_f32( for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { - dst[r1*ne0 + first_row + row] = all_sum; + dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = all_sum; } } } @@ -1640,10 +1235,14 @@ kernel void kernel_mul_mat_q4_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, constant int64_t & ne01[[buffer(4)]], - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1653,10 +1252,12 @@ kernel void kernel_mul_mat_q4_K_f32( const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; + const int r2 = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int ib_row = first_row * nb; - device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row; - device const float * y = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; + device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float yl[8]; float yh[8]; float sumf[N_DST]={0.f}, all_sum; @@ -1712,7 +1313,7 @@ kernel void kernel_mul_mat_q4_K_f32( for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { - dst[r1*ne0 + first_row + row] = all_sum; + dst[r1*ne0+ r2*ne0*ne1 + first_row + row] = all_sum; } } } @@ -1723,9 +1324,14 @@ kernel void kernel_mul_mat_q5_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne01[[buffer(4)]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1733,11 +1339,12 @@ kernel void kernel_mul_mat_q5_K_f32( const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; + const int r2 = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; - - device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb; - device const float * yy = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0; + device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float sumf[2]={0.f}; @@ -1871,7 +1478,7 @@ kernel void kernel_mul_mat_q5_K_f32( for (int row = 0; row < 2; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0) { - dst[r1*ne0 + first_row + row] = tot; + dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = tot; } } @@ -1882,9 +1489,14 @@ kernel void kernel_mul_mat_q6_K_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne10, - constant int64_t & ne0, - uint2 tgpig[[threadgroup_position_in_grid]], + constant int64_t & ne01[[buffer(4)]], + constant int64_t & ne02[[buffer(5)]], + constant int64_t & ne10[[buffer(9)]], + constant int64_t & ne12[[buffer(11)]], + constant int64_t & ne0[[buffer(15)]], + constant int64_t & ne1[[buffer(16)]], + constant uint & gqa[[buffer(17)]], + uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { @@ -1897,11 +1509,12 @@ kernel void kernel_mul_mat_q6_K_f32( const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; + const int r2 = tgpig.z; const int row = 2 * r0 + sgitg; - - device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb; //r0*nb; - device const float * yy = (device const float *) src1 + r1*ne10; + const uint offset0 = r2/gqa*(nb*ne0); + device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0; + device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1; float sumf = 0; @@ -1967,6 +1580,366 @@ kernel void kernel_mul_mat_q6_K_f32( const float tot = simd_sum(sumf); if (tiisg == 0) { - dst[r1*ne0 + row] = tot; + dst[r1*ne0 + r2*ne0*ne1 + row] = tot; } } + +//============================= templates and their specializations ============================= + +template +void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) { + half4x4 temp = *(((device half4x4 *)src)); + for (int i = 0; i < 16; i++){ + reg[i/4][i%4] = temp[i/4][i%4]; + } +} + +template +void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 1); + const half d = il ? (xb->d / 16.h) : xb->d; + const half m = il ? (-8.h * 16.h) : -8.h; + const ushort mask0 = il ? 0x00F0 : 0x000F; + const ushort mask1 = il ? 0xF000 : 0x0F00; + + for (int i=0;i<8;i++) { + reg[i/2][2*(i%2)] = (((qs[i] & mask0)) + m) * d; + reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) + m) * d; + } +} + +template +void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 2); + const half d = il ? (xb->d / 16.h) : xb->d; + const half m = xb->m; + const ushort mask0 = il ? 0x00F0 : 0x000F; + const ushort mask1 = il ? 0xF000 : 0x0F00; + + for (int i=0;i<8;i++) { + reg[i/2][2*(i%2)] = (((qs[i] & mask0)) * d) + m; + reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) * d) + m; + } +} + +template +void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { + const half d = xb->d; + const half min = xb->dmin; + device const uint8_t * q = (device const uint8_t *)xb->qs; + half dl, ml; + uint8_t sc = xb->scales[il]; + +#if QK_K == 256 + q = q + 32*(il/8) + 16*(il&1); + il = (il/2)%4; +#endif + half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); + uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); + dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4); + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = dl * (q[i] & mask) - ml; + } +} + +template +void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) { + const float d_all = (float)(xb->d); + device const uint8_t * q = (device const uint8_t *)xb->qs; + device const uint8_t * h = (device const uint8_t *)xb->hmask; + device const int8_t * scales = (device const int8_t *)xb->scales; + +#if QK_K == 256 + q = q + 32 * (il/8) + 16 * (il&1); + h = h + 16 * (il&1); + uint8_t m = 1 << (il/2); + uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \ + ((il/4)>0 ? 12 : 3); + uint16_t kmask2 = il/8 ? 0xF0 : 0x0F; + uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4]; + int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) : \ + (scale_2&kmask2) | ((scale_1&kmask1) << 4); + float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f); + + il = (il/2)%4; + float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); + uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); + + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i] & m) ? 0 : 4.f/coef)); + } +#else + float kcoef = il&1 ? 1.f/16.f : 1.f; + uint16_t kmask = il&1 ? 0xF0 : 0x0F; + float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8); + float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); + uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); + uint8_t m = 1<<(il*2); + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef)); + } +#endif +} + +template +void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) { + device const uint8_t * q = xb->qs; + +#if QK_K == 256 + const float d = (float)(xb->d); + const float min = (float)(xb->dmin); + short is = (il/4) * 2; + q = q + (il/4) * 32 + 16 * (il&1); + il = il%4; + const uchar4 sc = get_scale_min_k4(is, xb->scales); + const float dl = il<2 ? d * sc[0] : d * sc[2]/16.h; + const float ml = il<2 ? min * sc[1] : min * sc[3]; +#else + q = q + 16 * (il&1); + device const uint8_t * s = xb->scales; + device const half2 * dh = (device const half2 *)xb->d; + const float2 d = (float2)dh[0]; + const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h; + const float ml = il<2 ? d[1] * (s[0]>>4) : d[1 ]* (s[1]>>4); +#endif + const ushort mask = il<2 ? 0x0F : 0xF0; + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = dl * (q[i] & mask) - ml; + } +} + +template +void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) { + device const uint8_t * q = xb->qs; + device const uint8_t * qh = xb->qh; + +#if QK_K == 256 + const float d = (float)(xb->d); + const float min = (float)(xb->dmin); + short is = (il/4) * 2; + q = q + 32 * (il/4) + 16 * (il&1); + qh = qh + 16 * (il&1); + uint8_t ul = 1 << (il/2); + il = il%4; + const uchar4 sc = get_scale_min_k4(is, xb->scales); + const float dl = il<2 ? d * sc[0] : d * sc[2]/16.h; + const float ml = il<2 ? min * sc[1] : min * sc[3]; + + const ushort mask = il<2 ? 0x0F : 0xF0; + const float qh_val = il<2 ? 16.f : 256.f; + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml; + } +#else + q = q + 16 * (il&1); + device const int8_t * s = xb->scales; + const float dl = xb->d * s[il]; + uint8_t m = 1<<(il*2); + const float coef = il<2 ? 1.f : 1.f/16.f; + const ushort mask = il<2 ? 0x0F : 0xF0; + for (int i = 0; i < 16; ++i) { + reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef)); + } +#endif +} + +template +void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) { + const float d_all = (float)(xb->d); + device const uint8_t * ql = (device const uint8_t *)xb->ql; + device const uint8_t * qh = (device const uint8_t *)xb->qh; + device const int8_t * scales = (device const int8_t *)xb->scales; + +#if QK_K == 256 + ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1); + qh = qh + 32*(il/8) + 16*(il&1); + float sc = scales[(il%2) + 2 * ((il/2))]; + il = (il/2)%4; +#else + ql = ql + 16 * (il&1); + float sc = scales[il]; +#endif + for (int i = 0; i < 16; ++i) { + uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); + uint16_t kmask2 = il>1 ? 0xF0 : 0x0F; + const float coef = il>1 ? 1.f/16.f : 1.f; + float q = il&1 ? ((ql[i]&kmask2)|((qh[i]&kmask1)<<2)) - 32.f/coef : \ + ((ql[i]&kmask2)|((qh[i]&kmask1)<<4)) - 32.f/coef; + reg[i/4][i%4] = d_all * sc * q * coef; + } +} + +template +kernel void kernel_get_rows( + device const void * src0, + device const int * src1, + device float * dst, + constant int64_t & ne00, + constant uint64_t & nb01, + constant uint64_t & nb1, + uint tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint tptg[[threads_per_threadgroup]]) { + const int i = tgpig; + const int r = ((device int32_t *) src1)[i]; + + for (int ind = tiitg; ind < ne00/16; ind += tptg) { + float4x4 temp; + dequantize_func( + ((device const block_q *) ((device char *) src0 + r*nb01)) + ind/nl, ind%nl, temp); + *(((device float4x4 *) ((device char *) dst + i*nb1)) + ind) = temp; + } +} + +#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A +#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix A +#define BLOCK_SIZE_K 32 +#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A +#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B +#define THREAD_PER_BLOCK 128 +#define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers +#define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers +#define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8 +#define SG_MAT_ROW 8 + +// each block_q contains 16*nl weights +template +kernel void kernel_mul_mm(device const uchar * src0, + device const float * src1, + device float * dst, + constant int64_t & ne00, + constant int64_t & ne02, + constant int64_t & nb01, + constant int64_t & nb02, + constant int64_t & ne12, + constant int64_t & ne0, + constant int64_t & ne1, + constant uint & gqa, + threadgroup uchar * shared_memory [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint sgitg[[simdgroup_index_in_threadgroup]]) { + + threadgroup half * sa = ((threadgroup half *)shared_memory); + threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); + + const uint r0 = tgpig.y; + const uint r1 = tgpig.x; + const uint im = tgpig.z; + // if this block is of 64x32 shape or smaller + short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; + short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; + // a thread shouldn't load data outside of the matrix + short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; + short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; + + simdgroup_half8x8 ma[4]; + simdgroup_float8x8 mb[2]; + simdgroup_float8x8 c_res[8]; + for (int i = 0; i < 8; i++){ + c_res[i] = make_filled_simdgroup_matrix(0.f); + } + + short il = (tiitg % THREAD_PER_ROW); + uint offset0 = im/gqa*nb02; ushort offset1 = il/nl; + device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; + device const float * y = src1 + (r1 * BLOCK_SIZE_N + thread_col) * ne00 \ + + BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL) + im * ne00 * ne1; + + for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { + //load data and store to threadgroup memory + half4x4 temp_a; + dequantize_func(x, il, temp_a); + #pragma unroll(16) + for (int i = 0; i < 16; i++) { + *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ + + 16 * (tiitg % THREAD_PER_ROW) + 8 * (i / 8)) \ + + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; + } + *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) \ + = *((device float2x4 *)y); + il = (il + 2 < nl) ? il + 2 : il % 2; + x = (il < 2) ? x + (2+nl-1)/nl : x; + y += BLOCK_SIZE_K; + + threadgroup_barrier(mem_flags::mem_threadgroup); + //load matrices from threadgroup memory and conduct outer products + threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); + threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); + #pragma unroll(4) + for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { + #pragma unroll(4) + for (int i = 0; i < 4; i++) { + simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); + } + simdgroup_barrier(mem_flags::mem_none); + #pragma unroll(2) + for (int i = 0; i < 2; i++) { + simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); + } + + lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; + lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; + #pragma unroll(8) + for (int i = 0; i < 8; i++){ + simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); + } + } + } + + if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) { + device float *C = dst + BLOCK_SIZE_M * r0 + 32 * (sgitg&1) \ + + (BLOCK_SIZE_N * r1 + 16 * (sgitg>>1)) * ne0 + im*ne1*ne0; + for (int i = 0; i < 8; i++) { + simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0); + } + } else { + // block is smaller than 64x32, we should avoid writing data outside of the matrix + threadgroup float *temp_str = ((threadgroup float *)shared_memory) \ + + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; + for (int i = 0; i < 8; i++) { + simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0; + if (sgitg==0) { + for (int i = 0; i < n_rows; i++) { + for (int j = tiitg; j< n_cols; j += BLOCK_SIZE_N) { + *(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); + } + } + } + } +} + +#if QK_K == 256 +#define QK_NL 16 +#else +#define QK_NL 4 +#endif + +typedef void (get_rows_t)(device const void *, device const int *, device float *, constant int64_t &, \ + constant uint64_t &, constant uint64_t &, uint, uint, uint); + +template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows; +template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows; + +typedef void (mat_mm_t)(device const uchar *, device const float *, device float *, constant int64_t &,\ + constant int64_t &, constant int64_t &, constant int64_t &, constant int64_t &, \ + constant int64_t &, constant int64_t &, constant uint &, threadgroup uchar *, uint3, uint, uint); + +template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm; diff --git a/llama.cpp b/llama.cpp index c8ab313d9..a161f1566 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1845,7 +1845,7 @@ static bool llama_eval_internal( #endif #ifdef GGML_USE_METAL - if (lctx.ctx_metal && N == 1) { + if (lctx.ctx_metal) { // TODO: disabled until #2413 is resolved //if (!ggml_metal_if_optimized(lctx.ctx_metal)) { // ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf); @@ -1857,22 +1857,6 @@ static bool llama_eval_internal( ggml_metal_get_tensor(lctx.ctx_metal, embeddings); } } else { - // IMPORTANT: - // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla - // ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX - // coprocessor. - // - // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. - // But for now, we have focused only on Matrix x Vector Metal multiplication. - // - // TODO: avoid these syncs via shared memory (ref #1696) - // - if (lctx.ctx_metal) { - // We need to sync the GPU KV cache with the CPU KV cache - ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); - ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); - } - ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); } #else From fc8ef549e50087762a0b4f901cd74b2defcc6ae3 Mon Sep 17 00:00:00 2001 From: Shouzheng Liu Date: Wed, 16 Aug 2023 16:08:28 -0400 Subject: [PATCH 04/71] metal : enable ggml-alloc (#2627) * metal: enable ggml-alloc Make ggml-alloc work with concurrently dispatch. * style-fix Co-authored-by: slaren --------- Co-authored-by: slaren Co-authored-by: Georgi Gerganov --- ggml-alloc.c | 25 ++++++++++++++++++++++++- ggml-alloc.h | 4 ++++ ggml-metal.h | 9 ++++++--- ggml-metal.m | 15 ++++++++------- llama.cpp | 34 +++++++++++++++++++--------------- 5 files changed, 61 insertions(+), 26 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 4121f3dba..8de28cf9d 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -67,6 +67,8 @@ struct ggml_allocr { struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE]; size_t max_size; bool measure; + int parse_seq[GGML_MAX_NODES]; + bool has_parse_seq; #ifdef GGML_ALLOCATOR_DEBUG struct ggml_tensor * allocated_tensors[1024]; @@ -229,6 +231,17 @@ static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_t alloc->n_free_blocks++; } +void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n) { + int pos = 0; + for (int i = 0; i < n; i++) { + if (list[i] != -1) { + alloc->parse_seq[pos] = list[i]; + pos++; + } + } + alloc->has_parse_seq = true; +} + void ggml_allocr_reset(struct ggml_allocr * alloc) { alloc->n_free_blocks = 1; size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment); @@ -248,6 +261,8 @@ struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) /*.hash_table = */ {{0}}, /*.max_size = */ 0, /*.measure = */ false, + /*.parse_seq = */ {0}, + /*.has_parse_seq = */ false, #ifdef GGML_ALLOCATOR_DEBUG /*.allocated_tensors = */ = {0}, #endif @@ -275,6 +290,8 @@ struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) { /*.hash_table = */ {{0}}, /*.max_size = */ 0, /*.measure = */ true, + /*.parse_seq = */ {0}, + /*.has_parse_seq = */ false, #ifdef GGML_ALLOCATOR_DEBUG /*.allocated_tensors = */ = {0}, #endif @@ -473,7 +490,13 @@ static size_t ggml_allocator_alloc_graph_tensors_n( allocate_node(alloc, input); } } - for (int i = 0; i < gf->n_nodes; i++) { + for (int ind = 0; ind < gf->n_nodes; ind++) { + int i; + if (alloc->has_parse_seq) { + i = alloc->parse_seq[ind]; + } else { + i = ind; + } struct ggml_tensor * node = gf->nodes[i]; // allocate parents (leafs) diff --git a/ggml-alloc.h b/ggml-alloc.h index a5ec8f87a..14a4350ac 100644 --- a/ggml-alloc.h +++ b/ggml-alloc.h @@ -10,6 +10,10 @@ extern "C" { GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment); GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment); +// tell the allocator to parse nodes following the order described in the list +// you should call this if your graph are optimized to execute out-of-order +GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n); + GGML_API void ggml_allocr_free(struct ggml_allocr * alloc); GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc); GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc); diff --git a/ggml-metal.h b/ggml-metal.h index 16f1a0caa..bf3f9a6a8 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -63,10 +63,13 @@ void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * // try to find operations that can be run concurrently in the graph // you should run it again if the topology of your graph changes -void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf); +void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf, bool check_mem); -// if the graph has been optimized for concurrently dispatch -bool ggml_metal_if_optimized(struct ggml_metal_context * ctx); +// if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized +int ggml_metal_if_optimized(struct ggml_metal_context * ctx); + +// output the concur_list for ggml_alloc +int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx); // same as ggml_graph_compute but uses Metal // creates gf->n_threads command buffers in parallel diff --git a/ggml-metal.m b/ggml-metal.m index e13cb4b3c..32c6e4869 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -236,11 +236,12 @@ void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) { ctx->n_cb = n_cb; } -bool ggml_metal_if_optimized(struct ggml_metal_context * ctx) { - if (ctx->concur_list_len) { - return true; - } - return false; +int ggml_metal_if_optimized(struct ggml_metal_context * ctx) { + return ctx->concur_list_len; +} + +int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) { + return ctx->concur_list; } // finds the Metal buffer that contains the tensor data on the GPU device @@ -383,7 +384,7 @@ void ggml_metal_get_tensor( void ggml_metal_graph_find_concurrency( struct ggml_metal_context * ctx, - struct ggml_cgraph * gf) { + struct ggml_cgraph * gf, bool check_mem) { int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time int nodes_unused[GGML_MAX_CONCUR]; @@ -430,7 +431,7 @@ void ggml_metal_graph_find_concurrency( } } } - if (exe_flag) { + if (exe_flag && check_mem) { // check if nodes[i]'s data will be overwritten by a node before nodes[i]. // if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3] int64_t data_start = (int64_t) gf->nodes[i]->data; diff --git a/llama.cpp b/llama.cpp index a161f1566..345243990 100644 --- a/llama.cpp +++ b/llama.cpp @@ -63,7 +63,7 @@ static void llama_log_callback_default(llama_log_level level, const char * text, #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) -#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) +#if !defined(GGML_USE_CUBLAS) #include "ggml-alloc.h" #define LLAMA_USE_ALLOCATOR #else @@ -1846,10 +1846,6 @@ static bool llama_eval_internal( #ifdef GGML_USE_METAL if (lctx.ctx_metal) { - // TODO: disabled until #2413 is resolved - //if (!ggml_metal_if_optimized(lctx.ctx_metal)) { - // ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf); - //} ggml_metal_set_n_cb (lctx.ctx_metal, n_threads); ggml_metal_graph_compute(lctx.ctx_metal, gf); ggml_metal_get_tensor (lctx.ctx_metal, res); @@ -3287,7 +3283,18 @@ struct llama_context * llama_new_context_with_model( int n_past = hparams.n_ctx - n_tokens; llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); - +#ifdef GGML_USE_METAL + if (params.n_gpu_layers > 0) { + ctx->ctx_metal = ggml_metal_init(1); + if (!ctx->ctx_metal) { + LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); + llama_free(ctx); + return NULL; + } + ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false); + ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); + } +#endif // measure memory requirements for the graph size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; @@ -3305,6 +3312,11 @@ struct llama_context * llama_new_context_with_model( ctx->buf_alloc.resize(alloc_size); ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment); +#ifdef GGML_USE_METAL + if (ctx->ctx_metal) { + ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); + } +#endif } #else ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); @@ -3319,13 +3331,6 @@ struct llama_context * llama_new_context_with_model( #ifdef GGML_USE_METAL if (params.n_gpu_layers > 0) { // this allocates all Metal resources and memory buffers - ctx->ctx_metal = ggml_metal_init(1); - - if (!ctx->ctx_metal) { - LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); - llama_free(ctx); - return NULL; - } void * data_ptr = NULL; size_t data_size = 0; @@ -3354,8 +3359,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0)); LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.addr, ctx->buf_alloc.size, 0)); #undef LLAMA_METAL_CHECK_BUF } #endif From ed53db86c3b0e0815331a96d7a379edb5e62472c Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Thu, 17 Aug 2023 04:09:03 +0800 Subject: [PATCH 05/71] metal : print error of load pipeline state (#2564) * metal : print error of load pipeline state * metal : return null if load pipeline failed --- ggml-metal.m | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 32c6e4869..d23fff1dd 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -163,10 +163,15 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { // load kernels { + NSError * error = nil; #define GGML_METAL_ADD_KERNEL(name) \ ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ - ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:nil]; \ - fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name); + ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ + fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name); \ + if (error) { \ + fprintf(stderr, "%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ + return NULL; \ + } GGML_METAL_ADD_KERNEL(add); GGML_METAL_ADD_KERNEL(add_row); From 0919a0f73d95cfb93a1646a1d1741a0615fe2c5e Mon Sep 17 00:00:00 2001 From: Kolen Cheung Date: Wed, 16 Aug 2023 21:09:49 +0100 Subject: [PATCH 06/71] cmake : install ggml-meta.metal if LLAMA_METAL (#2449) --- CMakeLists.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 01b40c2e8..824d9f2cf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -569,6 +569,16 @@ install( WORLD_READ WORLD_EXECUTE DESTINATION ${CMAKE_INSTALL_BINDIR}) +if (LLAMA_METAL) + install( + FILES ggml-metal.metal + PERMISSIONS + OWNER_READ + OWNER_WRITE + GROUP_READ + WORLD_READ + DESTINATION ${CMAKE_INSTALL_BINDIR}) +endif() # # programs, examples and tests From 42f8fe19272554c2aafe1be5ab2366d0e136ce3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Thu, 17 Aug 2023 08:56:42 +0300 Subject: [PATCH 07/71] examples/gguf : no need to keep q option for quantization any more --- examples/gguf/gguf.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index d742dce17..dee00df87 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -233,16 +233,13 @@ int main(int argc, char ** argv) { const std::string fname(argv[1]); const std::string mode (argv[2]); - GGML_ASSERT((mode == "r" || mode == "w" || mode == "q") && "mode must be r, w or q"); + GGML_ASSERT((mode == "r" || mode == "w") && "mode must be r or w"); if (mode == "w") { GGML_ASSERT(gguf_ex_write(fname) && "failed to write gguf file"); } else if (mode == "r") { GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file"); GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file"); - } else if (mode == "q") { - llama_model_quantize_params params = llama_model_quantize_default_params(); - llama_model_quantize(fname.c_str(), "quant.gguf", ¶ms); } return 0; From a872a2b28eaefc8d464eaa535c94deeb501666f9 Mon Sep 17 00:00:00 2001 From: Shouzheng Liu Date: Thu, 17 Aug 2023 03:35:53 -0400 Subject: [PATCH 08/71] ggml-alloc : fix discrepency between measure&eval (#2639) The GGML memory allocator consistently places a tensor within the optimal-fit memory block, which is the smallest block capable of accommodating the tensor's size. During the measurement phase, the final block is generously sized, ensuring it never qualifies as the optimal-fit block as long as there exists another block capable of accommodating the tensor. Nevertheless, in the evaluation phase, the last block is constrained in size and could potentially qualify as the optimal-fit block. Consequently, there exists the possibility of a tensor being allocated to a different region during evaluation, leading to more memory fragmentation in our scratch buffer. This recent commit guarantees uniform behavior of the allocator across both the measurement and evaluation phases, eliminating discrepancies between the two. --- ggml-alloc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 8de28cf9d..3ee98d03d 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -113,10 +113,10 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) size_t max_avail = 0; - // find the best fitting free block + // find the best fitting free block besides the last block int best_fit_block = -1; size_t best_fit_size = SIZE_MAX; - for (int i = 0; i < alloc->n_free_blocks; i++) { + for (int i = 0; i < alloc->n_free_blocks - 1; i++) { struct free_block * block = &alloc->free_blocks[i]; max_avail = MAX(max_avail, block->size); if (block->size >= size && block->size <= best_fit_size) { @@ -128,10 +128,17 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) AT_PRINTF("block %d\n", best_fit_block); if (best_fit_block == -1) { - fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n", - __func__, size, max_avail); - GGML_ASSERT(!"not enough space in the buffer"); + // the last block is our last resort + struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1]; + if (block->size >= size) { + best_fit_block = alloc->n_free_blocks - 1; + max_avail = MAX(max_avail, block->size); + } else { + fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n", + __func__, size, max_avail); + GGML_ASSERT(!"not enough space in the buffer"); return; + } } struct free_block * block = &alloc->free_blocks[best_fit_block]; void * addr = block->addr; From 7cf54e1f746941279d81d485796777c01f88049c Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 17 Aug 2023 03:41:01 -0400 Subject: [PATCH 09/71] tests : adds simple llama grammar tests (#2618) * adds simple llama grammar tests * fix lint and add Makefile * 0 terminate code_points * avoid dangling pointers in candidate cleanup * cleanup grammar at end of test --- Makefile | 5 +- tests/CMakeLists.txt | 1 + tests/test-llama-grammar.cpp | 403 +++++++++++++++++++++++++++++++++++ 3 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 tests/test-llama-grammar.cpp diff --git a/Makefile b/Makefile index 5b801d16f..376a091dc 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test # Binaries only useful for tests -TEST_TARGETS = tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0 +TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0 default: $(BUILD_TARGETS) @@ -412,6 +412,9 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) +tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS) + $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) + tests/test-grammar-parser: tests/test-grammar-parser.cpp examples/grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 689fb6f2a..276f39b3b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -12,5 +12,6 @@ llama_add_test(test-quantize-perf.cpp) llama_add_test(test-sampling.cpp) llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) llama_add_test(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp) +llama_add_test(test-llama-grammar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/common.cpp) llama_add_test(test-grad0.cpp) # SLOW # llama_add_test(test-opt.cpp) # SLOW diff --git a/tests/test-llama-grammar.cpp b/tests/test-llama-grammar.cpp new file mode 100644 index 000000000..f98c6531f --- /dev/null +++ b/tests/test-llama-grammar.cpp @@ -0,0 +1,403 @@ +#ifdef NDEBUG +#undef NDEBUG +#endif + +#include "llama.cpp" +#include "examples/common.cpp" +#include "examples/grammar-parser.cpp" +#include + +int main() +{ + grammar_parser::parse_state parsed_grammar; + + std::vector> expected = { + {"expr", 2}, + {"expr_6", 6}, + {"expr_7", 7}, + {"ident", 8}, + {"ident_10", 10}, + {"num", 9}, + {"num_11", 11}, + {"root", 0}, + {"root_1", 1}, + {"root_5", 5}, + {"term", 4}, + {"ws", 3}, + {"ws_12", 12}, + }; + + std::vector> expected_rules = { + {{LLAMA_GRETYPE_RULE_REF, 5}, {LLAMA_GRETYPE_END, 0}}, + { + {LLAMA_GRETYPE_RULE_REF, 2}, + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_RULE_REF, 4}, + {LLAMA_GRETYPE_CHAR, 10}, + {LLAMA_GRETYPE_END, 0}, + }, + {{LLAMA_GRETYPE_RULE_REF, 4}, {LLAMA_GRETYPE_RULE_REF, 7}, {LLAMA_GRETYPE_END, 0}}, + {{LLAMA_GRETYPE_RULE_REF, 12}, {LLAMA_GRETYPE_END, 0}}, + { + {LLAMA_GRETYPE_RULE_REF, 8}, + {LLAMA_GRETYPE_ALT, 0}, + {LLAMA_GRETYPE_RULE_REF, 9}, + {LLAMA_GRETYPE_ALT, 0}, + {LLAMA_GRETYPE_CHAR, 40}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_RULE_REF, 2}, + {LLAMA_GRETYPE_CHAR, 41}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_END, 0}, + }, + {{LLAMA_GRETYPE_RULE_REF, 1}, {LLAMA_GRETYPE_RULE_REF, 5}, {LLAMA_GRETYPE_ALT, 0}, {LLAMA_GRETYPE_RULE_REF, 1}, {LLAMA_GRETYPE_END, 0}}, + { + {LLAMA_GRETYPE_CHAR, 45}, + {LLAMA_GRETYPE_CHAR_ALT, 43}, + {LLAMA_GRETYPE_CHAR_ALT, 42}, + {LLAMA_GRETYPE_CHAR_ALT, 47}, + {LLAMA_GRETYPE_RULE_REF, 4}, + {LLAMA_GRETYPE_END, 0}, + }, + {{LLAMA_GRETYPE_RULE_REF, 6}, {LLAMA_GRETYPE_RULE_REF, 7}, {LLAMA_GRETYPE_ALT, 0}, {LLAMA_GRETYPE_END, 0}}, + { + {LLAMA_GRETYPE_CHAR, 97}, + {LLAMA_GRETYPE_CHAR_RNG_UPPER, 122}, + {LLAMA_GRETYPE_RULE_REF, 10}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_END, 0}, + }, + {{LLAMA_GRETYPE_RULE_REF, 11}, {LLAMA_GRETYPE_RULE_REF, 3}, {LLAMA_GRETYPE_END, 0}}, + { + {LLAMA_GRETYPE_CHAR, 97}, + {LLAMA_GRETYPE_CHAR_RNG_UPPER, 122}, + {LLAMA_GRETYPE_CHAR_ALT, 48}, + {LLAMA_GRETYPE_CHAR_RNG_UPPER, 57}, + {LLAMA_GRETYPE_CHAR_ALT, 95}, + {LLAMA_GRETYPE_RULE_REF, 10}, + {LLAMA_GRETYPE_ALT, 0}, + {LLAMA_GRETYPE_END, 0}, + }, + { + {LLAMA_GRETYPE_CHAR, 48}, + {LLAMA_GRETYPE_CHAR_RNG_UPPER, 57}, + {LLAMA_GRETYPE_RULE_REF, 11}, + {LLAMA_GRETYPE_ALT, 0}, + {LLAMA_GRETYPE_CHAR, 48}, + {LLAMA_GRETYPE_CHAR_RNG_UPPER, 57}, + {LLAMA_GRETYPE_END, 0}, + }, + { + {LLAMA_GRETYPE_CHAR, 32}, + {LLAMA_GRETYPE_CHAR_ALT, 9}, + {LLAMA_GRETYPE_CHAR_ALT, 10}, + {LLAMA_GRETYPE_RULE_REF, 12}, + {LLAMA_GRETYPE_ALT, 0}, + {LLAMA_GRETYPE_END, 0}, + }, + }; + + for (auto pair : expected) + { + parsed_grammar.symbol_ids[pair.first] = pair.second; + } + + for (auto rule : expected_rules) + { + parsed_grammar.rules.push_back({}); + for (auto element : rule) + { + parsed_grammar.rules.back().push_back(element); + } + } + + llama_grammar *grammar = NULL; + std::vector grammar_rules(parsed_grammar.c_rules()); + grammar = llama_grammar_init( + grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); + + std::vector> expected_stacks = { + { + {LLAMA_GRETYPE_RULE_REF, 5}, + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_CHAR, 97}, + }, + { + {LLAMA_GRETYPE_RULE_REF, 5}, + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_CHAR, 48}, + }, + { + {LLAMA_GRETYPE_RULE_REF, 5}, + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_CHAR, 48}, + }, + { + {LLAMA_GRETYPE_RULE_REF, 5}, + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_CHAR, 40}, + }, + { + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_CHAR, 97}, + }, + { + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_CHAR, 48}, + }, + { + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_RULE_REF, 3}, + {LLAMA_GRETYPE_CHAR, 48}, + }, + { + {LLAMA_GRETYPE_CHAR, 61}, + {LLAMA_GRETYPE_RULE_REF, 7}, + {LLAMA_GRETYPE_CHAR, 40}, + }}; + + auto index = 0; + for (auto stack : grammar->stacks) + { + // compare stack to expected_stack + for (uint32_t i = 0; i < stack.size(); i++) + { + auto element = stack[i]; + auto expected_element = expected_stacks[index][i]; + + // pretty print error message before asserting + if (expected_element.type != element->type || expected_element.value != element->value) + { + fprintf(stderr, "index: %d\n", index); + fprintf(stderr, "expected_element: %d, %d\n", expected_element.type, expected_element.value); + fprintf(stderr, "actual_element: %d, %d\n", element->type, element->value); + fprintf(stderr, "expected_element != actual_element\n"); + } + + assert(expected_element.type == element->type && expected_element.value == element->value); + } + index++; + } + + std::vector> next_stacks; + std::vector next_candidates; + next_candidates.resize(24); + + for (size_t i = 0; i < 24; ++i) + { + uint32_t *cp = new uint32_t[2]; // dynamically allocate memory for code_point + cp[0] = 37 + i; + cp[1] = 0; + next_candidates[i] = {i, cp}; + } + + std::vector>> expected_reject = { + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {11, 48}, + {12, 49}, + {13, 50}, + {14, 51}, + {15, 52}, + {16, 53}, + {17, 54}, + {18, 55}, + {19, 56}, + {20, 57}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {11, 48}, + {12, 49}, + {13, 50}, + {14, 51}, + {15, 52}, + {16, 53}, + {17, 54}, + {18, 55}, + {19, 56}, + {20, 57}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {11, 48}, + {12, 49}, + {13, 50}, + {14, 51}, + {15, 52}, + {16, 53}, + {17, 54}, + {18, 55}, + {19, 56}, + {20, 57}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {3, 40}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + { + {0, 37}, + {1, 38}, + {2, 39}, + {4, 41}, + {5, 42}, + {6, 43}, + {7, 44}, + {8, 45}, + {9, 46}, + {10, 47}, + {11, 48}, + {12, 49}, + {13, 50}, + {14, 51}, + {15, 52}, + {16, 53}, + {17, 54}, + {18, 55}, + {19, 56}, + {20, 57}, + {21, 58}, + {22, 59}, + {23, 60}, + }, + }; + + std::vector rejects = llama_grammar_reject_candidates_for_stack(grammar->rules, grammar->stacks[0], next_candidates); + + std::vector> all_rejects; + + for (std::size_t count = 0; count < grammar->stacks.size(); ++count) + { + rejects = llama_grammar_reject_candidates_for_stack(grammar->rules, grammar->stacks[count], next_candidates); + all_rejects.push_back(rejects); + } + + index = 0; + for (auto rej : all_rejects) + { + for (uint32_t i = 0; i < rej.size(); i++) + { + auto element = rej[i]; + auto expected_element = expected_reject[index][i]; + assert(element.index == expected_element.first && *element.code_points == expected_element.second); + } + index++; + } + + for (auto &candidate : next_candidates) + { + delete[] candidate.code_points; + candidate.code_points = nullptr; + } + delete grammar; + return 0; +} From a73ccf1aa34de49f61bfeb7f8a679c3bfdb3abe3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 17 Aug 2023 10:47:09 +0300 Subject: [PATCH 10/71] llama : replace (permute + reshape + view_1d) with (view_3d) (#2538) ggml-ci --- llama.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/llama.cpp b/llama.cpp index 345243990..b8cc22942 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1609,11 +1609,11 @@ static struct ggml_cgraph * llama_build_graph( ggml_set_name(Q, "Q"); struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa), - n_embd_head, n_head_kv, n_past + N), - 0, 2, 1, 3); + ggml_view_3d(ctx0, kv_self.k, + n_embd_head, n_past + N, n_head_kv, + ggml_element_size(kv_self.k)*n_embd_gqa, + ggml_element_size(kv_self.k)*n_embd_head, + ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); offload_func_kq(K); ggml_set_name(K, "K"); @@ -1642,9 +1642,9 @@ static struct ggml_cgraph * llama_build_graph( struct ggml_tensor * V = ggml_view_3d(ctx0, kv_self.v, n_past + N, n_embd_head, n_head_kv, - n_ctx*ggml_element_size(kv_self.v), - n_ctx*ggml_element_size(kv_self.v)*n_embd_head, - n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il); + ggml_element_size(kv_self.v)*n_ctx, + ggml_element_size(kv_self.v)*n_ctx*n_embd_head, + ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); offload_func_v(V); ggml_set_name(V, "V"); From 5a0a2c5685544dc41304779fb3f05f2231e300bd Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Thu, 17 Aug 2023 15:18:16 +0200 Subject: [PATCH 11/71] llama.cpp : print actual model size --- llama.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 38a2d5ba8..5a1501651 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1023,6 +1023,7 @@ struct llama_model_loader { int n_kv = 0; int n_tensors = 0; int n_created = 0; + size_t n_tot_elements = 0; bool use_mmap = false; @@ -1047,6 +1048,16 @@ struct llama_model_loader { file_version = (enum llama_file_version) gguf_get_version(ctx_gguf); + for (int i = 0; i < n_tensors; i++) { + const char * name = gguf_get_tensor_name(ctx_gguf, i); + struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name); + size_t elem = 1; + for (int j = 0; j < t->n_dims; j++) { + elem *= t->ne[j]; + } + n_tot_elements += elem; + } + // print meta data // TODO: make optional { @@ -1413,7 +1424,8 @@ static void llama_model_load_internal( LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); - LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type)); + LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml->n_tot_elements*1e-9); + } if (vocab_only) { From d6fd53afd64417203d77e1530f2f7bf182ffa96e Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Thu, 17 Aug 2023 15:24:35 +0200 Subject: [PATCH 12/71] llama.cpp : use ggml_elements() --- llama.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/llama.cpp b/llama.cpp index 5a1501651..b7ca6db3c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1051,11 +1051,7 @@ struct llama_model_loader { for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name); - size_t elem = 1; - for (int j = 0; j < t->n_dims; j++) { - elem *= t->ne[j]; - } - n_tot_elements += elem; + n_tot_elements += ggml_nelements(t); } // print meta data From 8dae7ce68437faf1fa96ec0e7687b8700956ef20 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Thu, 17 Aug 2023 07:29:44 -0600 Subject: [PATCH 13/71] Add --cfg-negative-prompt-file option for examples (#2591) Add --cfg-negative-prompt-file option for examples --- examples/common.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/examples/common.cpp b/examples/common.cpp index 9f8aab9a2..bd39d9220 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -274,6 +274,21 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.cfg_negative_prompt = argv[i]; + } else if (arg == "--cfg-negative-prompt-file") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::ifstream file(argv[i]); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + invalid_param = true; + break; + } + std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.cfg_negative_prompt)); + if (params.cfg_negative_prompt.back() == '\n') { + params.cfg_negative_prompt.pop_back(); + } } else if (arg == "--cfg-scale") { if (++i >= argc) { invalid_param = true; @@ -567,8 +582,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n"); fprintf(stdout, " --grammar GRAMMAR BNF-like grammar to constrain generations (see samples in grammars/ dir)\n"); fprintf(stdout, " --grammar-file FNAME file to read grammar from\n"); - fprintf(stdout, " --cfg-negative-prompt PROMPT \n"); + fprintf(stdout, " --cfg-negative-prompt PROMPT\n"); fprintf(stdout, " negative prompt to use for guidance. (default: empty)\n"); + fprintf(stdout, " --cfg-negative-prompt-file FNAME\n"); + fprintf(stdout, " negative prompt file to use for guidance. (default: empty)\n"); fprintf(stdout, " --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale); fprintf(stdout, " --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale (default: %g)\n", 1.0f/params.rope_freq_scale); fprintf(stdout, " --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: %.1f)\n", params.rope_freq_base); From e0429d38e416c10a70a941faa423b1f2e80a04c7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 17 Aug 2023 17:19:52 +0300 Subject: [PATCH 14/71] convert-new.py : output gguf (#2635) * convert-new.py : output gguf (WIP) * convert-new.py : add gguf key-value pairs * llama : add hparams.ctx_train + no longer print ftype * convert-new.py : minor fixes * convert-new.py : vocab-only option should work now * llama : fix tokenizer to use llama_char_to_byte * tests : add new ggml-vocab-llama.gguf * convert-new.py : tensor name mapping * convert-new.py : add map for skipping tensor serialization * convert-new.py : convert script now works * gguf.py : pick some of the refactoring from #2644 * convert-new.py : minor fixes --- convert-llama-7b-pth-to-gguf.py | 2 +- convert-new.py | 443 +++++++++++++++++--------------- gguf.py | 301 ++++++++++++++++------ llama.cpp | 90 ++++--- models/ggml-vocab-aquila.bin | Bin 1963875 -> 0 bytes models/ggml-vocab-llama.bin | Bin 466955 -> 0 bytes models/ggml-vocab-llama.gguf | Bin 0 -> 467382 bytes tests/CMakeLists.txt | 6 +- tests/test-tokenizer-0.cpp | 11 +- 9 files changed, 526 insertions(+), 327 deletions(-) delete mode 100644 models/ggml-vocab-aquila.bin delete mode 100644 models/ggml-vocab-llama.bin create mode 100644 models/ggml-vocab-llama.gguf diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index c4e425ee3..9afea8a7e 100644 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -298,7 +298,7 @@ for part_name in part_names: print( name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - gguf_writer.write_tensor_to_file(data) + gguf_writer.write_tensor_data(data) gguf_writer.close() diff --git a/convert-new.py b/convert-new.py index 2c02ee73c..b243356f0 100755 --- a/convert-new.py +++ b/convert-new.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import gguf import argparse import concurrent.futures import copy @@ -33,6 +34,13 @@ if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' +ARCH=gguf.MODEL_ARCH.LLAMA +NAMES=gguf.MODEL_TENSOR_NAMES[ARCH] + +# +# data types +# + @dataclass(frozen=True) class UnquantizedDataType: name: str @@ -44,14 +52,6 @@ DT_BF16 = UnquantizedDataType('BF16') DataType = Union[UnquantizedDataType] -DATA_TYPE_TO_FTYPE: Dict[DataType, int] = { - DT_F32: 0, - DT_F16: 1, -} - -FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \ - {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()} - DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = { DT_BF16: np.dtype(np.uint16), DT_F16: np.dtype(np.float16), @@ -62,6 +62,13 @@ DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = { NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \ {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()} +SAFETENSORS_DATA_TYPES: Dict[str, DataType] = { + 'BF16': DT_BF16, + 'F16': DT_F16, + 'F32': DT_F32, + 'I32': DT_I32, +} + class GGMLFileType(enum.Enum): AllF32 = 0 MostlyF16 = 1 # except 1d tensors @@ -77,48 +84,31 @@ class GGMLFileType(enum.Enum): else: raise ValueError(self) -# TODO: this is LLaMA specific -def make_tensors_list() -> List[str]: - ret = [ - 'tok_embeddings.weight', - 'norm.weight', - 'output.weight', - ] - for i in range(80): # maximum number of layer - ret += [ - f'layers.{i}.attention.wq.weight', - f'layers.{i}.attention.wk.weight', - f'layers.{i}.attention.wv.weight', - f'layers.{i}.attention.wo.weight', - f'layers.{i}.attention_norm.weight', - f'layers.{i}.feed_forward.w1.weight', - f'layers.{i}.feed_forward.w2.weight', - f'layers.{i}.feed_forward.w3.weight', - f'layers.{i}.ffn_norm.weight', - ] - return ret - -# TODO: this should be generalized for non-LLaMA models -TENSORS_LIST = make_tensors_list() -TENSORS_SET = set(TENSORS_LIST) - -def find_n_mult(n_ff: int, n_embd: int) -> int: - # hardcoded magic range - for n_mult in range(8192, 1, -1): - calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult - if calc_ff == n_ff: - return n_mult - raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).") +# +# hparams loading +# @dataclass class Params: - n_vocab: int - n_embd: int - n_mult: int - n_head: int - n_layer: int - n_kv_head: Optional[int] # This parameter is only used for Llama 2 + n_vocab: int + n_embd: int + n_mult: int + n_layer: int + n_ctx: int + n_ff: int + n_head: int + n_head_kv: int + f_norm_eps: float + + @staticmethod + def find_n_mult(n_ff: int, n_embd: int) -> int: + # hardcoded magic range + for n_mult in range(8192, 1, -1): + calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult + if calc_ff == n_ff: + return n_mult + raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).") @staticmethod def guessed(model: 'LazyModel') -> 'Params': @@ -137,37 +127,57 @@ class Params: raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n" "Suggestion: provide 'config.json' of the model in the same directory containing model files.") - n_head=n_embd // 128 # guessed + n_head = n_embd // 128 # guessed + n_mult = 256 # guessed + + # TODO: verify this + n_ff = int(2 * (4 * n_embd) / 3) + n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult) return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = 256, - n_head = n_head, - n_layer = n_layer, - n_kv_head = None, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = -1, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head, + f_norm_eps = 1e-5, ) @staticmethod def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params': config = json.load(open(config_path)) - n_vocab = config["vocab_size"]; - n_embd = config["hidden_size"]; - n_head = config["num_attention_heads"]; - n_layer = config["num_hidden_layers"]; - n_ff = config["intermediate_size"]; - n_kv_head = config.get("num_key_value_heads") + n_vocab = config["vocab_size"]; + n_embd = config["hidden_size"]; + n_layer = config["num_hidden_layers"]; + n_ff = config["intermediate_size"]; + n_head = config["num_attention_heads"]; + n_head_kv = config["num_key_value_heads"]; + f_norm_eps = config["rms_norm_eps"]; - n_mult = find_n_mult(n_ff, n_embd); + n_mult = Params.find_n_mult(n_ff, n_embd); + + if "max_sequence_length" in config: + n_ctx = config["max_sequence_length"] + elif "max_position_embeddings" in config: + n_ctx = config["max_position_embeddings"] + else: + raise Exception("failed to guess 'n_ctx'. This model is unknown or unsupported.\n" + "Suggestion: provide 'config.json' of the model in the same directory containing model files.") return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = n_mult, - n_head = n_head, - n_layer = n_layer, - n_kv_head = n_kv_head, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = n_ctx, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head_kv, + f_norm_eps = f_norm_eps, ) # LLaMA v2 70B params.json @@ -176,22 +186,32 @@ class Params: def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params': config = json.load(open(config_path)) - n_vocab = config["vocab_size"]; - n_embd = config["dim"]; - n_head = config["n_heads"]; - n_layer = config["n_layers"]; - n_mult = config["multiple_of"]; + n_vocab = config["vocab_size"]; + n_embd = config["dim"]; + n_layer = config["n_layers"]; + n_mult = config["multiple_of"]; + n_ctx = 2048 if config["norm_eps"] == 1e-06 else 4096 # hack to determine LLaMA v1 vs v2 + n_ff = -1; + n_head = config["n_heads"]; + n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head; + f_norm_eps = config["norm_eps"]; if n_vocab == -1: n_vocab = model["tok_embeddings.weight"].shape[0] + if n_ff == -1: + n_ff = model["layers.0.feed_forward.w1.weight"].shape[0] + return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = n_mult, - n_head = n_head, - n_layer = n_layer, - n_kv_head = None, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = n_ctx, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head_kv, + f_norm_eps = f_norm_eps, ) @staticmethod @@ -206,10 +226,13 @@ class Params: else: params = Params.guessed(model_plus.model) - print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}') return params +# +# vocab +# + class BpeVocab: def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read()) @@ -294,13 +317,17 @@ class SentencePieceVocab: def __repr__(self) -> str: return f"" - Vocab = Union[BpeVocab, SentencePieceVocab] -def permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head +# +# data loading +# TODO: reuse (probably move to gguf.py?) +# + +def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: + if n_head_kv is not None and n_head != n_head_kv: + n_head //= n_head_kv return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) .swapaxes(1, 2) .reshape(weights.shape)) @@ -312,7 +339,7 @@ class Tensor(metaclass=ABCMeta): @abstractmethod def astype(self, data_type: DataType) -> 'Tensor': ... @abstractmethod - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'Tensor': ... + def permute(self, n_head: int, n_head_kv: int) -> 'Tensor': ... @abstractmethod def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ... @abstractmethod @@ -350,8 +377,8 @@ class UnquantizedTensor(Tensor): r = self.ndarray.shape[0] // 3 return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'UnquantizedTensor': - return UnquantizedTensor(permute(self.ndarray, n_head, n_kv_head)) + def permute(self, n_head: int, n_head_kv: int) -> 'UnquantizedTensor': + return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv)) def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray: @@ -374,18 +401,18 @@ GGMLCompatibleTensor = Union[UnquantizedTensor] class DeferredPermutedTensor(Tensor): - def __init__(self, base: Tensor, n_head: int, n_kv_head: Optional[int] = None) -> None: + def __init__(self, base: Tensor, n_head: int, n_head_kv: int) -> None: self.base = base self.n_head = n_head self.data_type = self.base.data_type def astype(self, data_type: DataType) -> Tensor: - return self.base.astype(data_type).permute(self.n_head, self.n_kv_head) + return self.base.astype(data_type).permute(self.n_head, self.n_head_kv) def to_ggml(self) -> GGMLCompatibleTensor: - return self.base.to_ggml().permute(self.n_head, self.n_kv_head) + return self.base.to_ggml().permute(self.n_head, self.n_head_kv) - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor: + def permute(self, n_head: int, n_head_kv: int) -> Tensor: raise Exception("shouldn't permute twice") @@ -481,10 +508,10 @@ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus: return ModelPlus(model, paths, format, vocab) -def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_kv_head: Optional[int] = None) -> LazyTensor: +def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor: def load() -> Tensor: - return lazy_tensor.load().permute(n_head, n_kv_head) - return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_kv_head}) ' + lazy_tensor.description) + return lazy_tensor.load().permute(n_head, n_head_kv) + return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor: def load() -> Tensor: @@ -500,34 +527,6 @@ def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: s[0] = s[0] // 3 return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description) -def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel: - out: LazyModel = {} - out["tok_embeddings.weight"] = model["model.embed_tokens.weight"] - out["norm.weight"] = model["model.norm.weight"] - out["output.weight"] = model["lm_head.weight"] - - for i in itertools.count(): - if f"model.layers.{i}.self_attn.q_proj.weight" in model: - out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head) - out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_kv_head) - out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] - elif f"model.layers.{i}.self_attn.W_pack.weight" in model: - out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head) - out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head) - out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) - else: - break - - out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"] - - out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"] - out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"] - out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"] - - out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"] - out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"] - return out - # Functionality that simulates `torch.load` but where individual tensors are # only loaded into memory on demand, not all at once. @@ -621,14 +620,6 @@ def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) -SAFETENSORS_DATA_TYPES: Dict[str, DataType] = { - 'BF16': DT_BF16, - 'F16': DT_F16, - 'F32': DT_F32, - 'I32': DT_I32, -} - - def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: header_size, = struct.unpack(' ModelPlus: In = TypeVar('In') Out = TypeVar('Out') - def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]: '''Parallel map, but with backpressure. If the caller doesn't call `next` fast enough, this will stop calling `func` at some point rather than @@ -715,88 +705,133 @@ def check_vocab_size(params: Params, vocab: Vocab) -> None: class OutputFile: def __init__(self, fname_out: Path) -> None: - self.fout = open(fname_out, "wb") + self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - def write_file_header(self, params: Params, file_type: GGMLFileType) -> None: - self.fout.write(b"ggjt"[::-1]) # magic - values = [ - 1, # file version - params.n_vocab, - params.n_embd, - params.n_mult, - params.n_head, - params.n_layer, - params.n_embd // params.n_head, # rot (obsolete) - file_type.value, - ] - self.fout.write(struct.pack("i" * len(values), *values)) + def add_meta_arch(self, params: Params) -> None: + self.gguf.add_context_length (params.n_ctx) + self.gguf.add_embedding_length (params.n_embd) + self.gguf.add_block_count (params.n_layer) + self.gguf.add_feed_forward_length (params.n_ff) + self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) + self.gguf.add_head_count (params.n_head) + self.gguf.add_head_count_kv (params.n_head_kv) + self.gguf.add_layer_norm_rms_eps (params.f_norm_eps) - def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None: - sname = name.encode('utf-8') - self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type])) - self.fout.write(struct.pack("i" * len(shape), *shape[::-1])) - self.fout.write(sname) - self.fout.seek((self.fout.tell() + 31) & -32) - - def write_vocab(self, vocab: Vocab) -> None: + def add_meta_vocab(self, vocab: Vocab) -> None: + tokens = [] + scores = [] for text, score in vocab.all_tokens(): - self.fout.write(struct.pack("i", len(text))) - self.fout.write(text) - self.fout.write(struct.pack("f", score)) + tokens.append(text) + scores.append(score) + + self.gguf.add_tokenizer_model("llama") + self.gguf.add_token_list(tokens) + self.gguf.add_token_scores(scores) + #self.gguf.add_token_types(toktypes) # TODO: add this + + # TODO: added / special tokens + + def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: + n_elements = 1 + for dim in tensor.shape: + n_elements *= dim + data_type = DATA_TYPE_TO_NUMPY[tensor.data_type] + data_nbytes = n_elements * data_type.itemsize + self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes) + + def write_meta(self) -> None: + self.gguf.write_header_to_file() + self.gguf.write_kv_data_to_file() + + def write_tensor_info(self) -> None: + self.gguf.write_ti_data_to_file() + + def close(self) -> None: + self.gguf.close() @staticmethod - def write_vocab_only(fname_out: Path, vocab: Vocab) -> None: - of = OutputFile(fname_out) - params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0) - of = OutputFile(fname_out) - of.write_file_header(params, file_type=GGMLFileType.AllF32) - of.write_vocab(vocab) - of.fout.close() - - @staticmethod - def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None: + def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None: check_vocab_size(params, vocab) + of = OutputFile(fname_out) - of.write_file_header(params, file_type) - print("Writing vocab...") - of.write_vocab(vocab) + + # meta data + of.add_meta_arch(params) + of.add_meta_vocab(vocab) + of.write_meta() + + of.close() + + @staticmethod + def write_all(fname_out: Path, params: Params, model: LazyModel, vocab: Vocab) -> None: + check_vocab_size(params, vocab) + + of = OutputFile(fname_out) + + # meta data + of.add_meta_arch(params) + of.add_meta_vocab(vocab) + + # tensor info + for name, lazy_tensor in model.items(): + of.add_tensor_info(name, lazy_tensor) + + of.write_meta() + of.write_tensor_info() def do_item(item: Tuple[str, LazyTensor]) -> NDArray: name, lazy_tensor = item return lazy_tensor.load().to_ggml().ndarray + # tensor data ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) padi = len(str(len(model))) print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}") - of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) - ndarray.tofile(of.fout) - of.fout.close() + of.gguf.write_tensor_data(ndarray) + of.close() def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType: - wq_type = model["layers.0.attention.wq.weight"].data_type - if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)): + wq_type = model[NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0)+".weight"].data_type + + if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32): return GGMLFileType.AllF32 - if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): + if output_type_str == "f16" or (output_type_str is None and wq_type in (DT_F16, DT_BF16)): return GGMLFileType.MostlyF16 + name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} + raise Exception(f"Unexpected combination of types: {name_to_type}") - -def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel: - if "lm_head.weight" in model: - model = convert_transformers_to_orig(model, params) - model = filter_and_sort_tensors(model) - - return model - - def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) for (name, tensor) in model.items()} +def convert_model_names(model: LazyModel, params: Params) -> LazyModel: + tmap = gguf.get_tensor_name_map(ARCH, params.n_layer) + + out: LazyModel = {} + for name, lazy_tensor in model.items(): + name_new = name + + if name in tmap: + name_new = tmap[name] + elif name.endswith(".weight") and name[:-7] in tmap: + name_new = tmap[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tmap: + name_new = tmap[name[:-5]] + ".bias" + else: + raise Exception(f"Unexpected tensor name: {name}") + + if gguf.should_skip_tensor(ARCH, params.n_layer, name_new): + print(f"skipping tensor {name_new}") + else: + print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type} | {lazy_tensor.shape}") + out[name_new] = lazy_tensor + + return out def nth_multifile_path(path: Path, n: int) -> Optional[Path]: '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return @@ -847,11 +882,6 @@ def load_some_model(path: Path) -> ModelPlus: # Try the PyTorch patterns too, with lower priority globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] files = [file for glob in globs for file in path.glob(glob)] - if not files: - # Try GGML too, but with lower priority, since if both a non-GGML - # model and a GGML model exist in the same directory, we assume the - # latter was converted from the former. - files = list(path.glob("ggml-model*.bin*")) if not files: raise Exception(f"Can't find model in directory {path}") if len(files) > 1: @@ -868,12 +898,7 @@ def load_some_model(path: Path) -> ModelPlus: return model_plus -def filter_and_sort_tensors(model: LazyModel) -> LazyModel: - return {name: model[name] for name in TENSORS_LIST if name in model} - - def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, SentencePieceVocab]: - print(f"vocabtype: {vocabtype}") # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. @@ -892,8 +917,10 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, Sentence raise FileNotFoundError( f"Could not find tokenizer.model in {path} or its parent; " "if it's in another directory, pass the directory as --vocab-dir") + + print(f"Loading vocab file '{path}', type '{vocabtype}'") + added_tokens_path = path.parent / "added_tokens.json" - print(f"Loading vocab file {path}") if vocabtype == "bpe": return BpeVocab(path, added_tokens_path if added_tokens_path.exists() else None) elif vocabtype == "spm": @@ -933,38 +960,52 @@ def main(args_in: Optional[List[str]] = None) -> None: parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") - parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)") + parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm") + parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") args = parser.parse_args(args_in) - vocab: Vocab if args.dump_single: model_plus = lazy_load_file(args.model) do_dump_model(model_plus) - elif args.vocab_only: + + model_plus = load_some_model(args.model) + + params = Params.load(model_plus) + if params.n_ctx == -1: + if args.ctx is None: + raise Exception("The model doesn't have a context size, and you didn't specify one with --ctx\n" + "Please specify one with --ctx:\n" + " - LLaMA v1: --ctx 2048\n" + " - LLaMA v2: --ctx 4096\n") + params.n_ctx = args.ctx + + print(f"params = {params}") + + vocab: Vocab + if args.vocab_only: vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" outfile = args.outfile - OutputFile.write_vocab_only(outfile, vocab) + OutputFile.write_vocab_only(outfile, params, vocab) print(f"Wrote {outfile}") else: - model_plus = load_some_model(args.model) if args.dump: do_dump_model(model_plus) return + if model_plus.vocab is not None and args.vocab_dir is None: vocab = model_plus.vocab else: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent vocab = load_vocab(vocab_dir, args.vocabtype) - params = Params.load(model_plus) model = model_plus.model - model = do_necessary_conversions(model, params) + model = convert_model_names(model, params) output_type = pick_output_type(model, args.outtype) model = convert_to_output_type(model, output_type) outfile = args.outfile or default_outfile(model_plus.paths, output_type) - OutputFile.write_all(outfile, params, output_type, model, vocab) + OutputFile.write_all(outfile, params, model, vocab) print(f"Wrote {outfile}") diff --git a/gguf.py b/gguf.py index e7f6f0ac8..a4dd10872 100644 --- a/gguf.py +++ b/gguf.py @@ -8,7 +8,7 @@ import sys import struct import numpy as np -from enum import IntEnum +from enum import IntEnum, auto from typing import Any, IO, List # @@ -33,24 +33,24 @@ KEY_GENERAL_SOURCE_URL = "general.source.url" KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository" # LLM -KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length" -KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length" -KEY_LLM_BLOCK_COUNT = "{llm}.block_count" -KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length" -KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual" -KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout" +KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length" +KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length" +KEY_LLM_BLOCK_COUNT = "{arch}.block_count" +KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" +KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual" +KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout" # attention -KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count" -KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv" -KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias" -KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv" -KEY_ATTENTION_LAYERNORM_EPS = "{llm}.attention.layer_norm_epsilon" -KEY_ATTENTION_LAYERNORM_RMS_EPS = "{llm}.attention.layer_norm_rms_epsilon" +KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count" +KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv" +KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" +KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv" +KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" +KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" # RoPE -KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count" -KEY_ROPE_SCALE = "{llm}.rope.scale" +KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count" +KEY_ROPE_SCALE = "{arch}.rope.scale" # tokenization KEY_TOKENIZER_MODEL = "tokenizer.ggml.model" @@ -70,34 +70,137 @@ KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world" # recommended mapping of model tensor names for storage in gguf # -def get_tensor_name_map(n_blocks : int): +class MODEL_ARCH(IntEnum): + LLAMA = auto() + FALCON = auto() + GPT2 = auto() + GPTJ = auto() + GPTNEOX = auto() + MPT = auto() + +class MODEL_TENSOR(IntEnum): + TOKEN_EMBD = auto() + POS_EMBD = auto() + OUTPUT = auto() + OUTPUT_NORM = auto() + ROPE_FREQS = auto() + ATTN_Q = auto() + ATTN_K = auto() + ATTN_V = auto() + ATTN_QKV = auto() + ATTN_OUT = auto() + ATTN_NORM = auto() + ATTN_NORM_2 = auto() + ATTN_ROT_EMBD = auto() + FFN_GATE = auto() + FFN_DOWN = auto() + FFN_UP = auto() + FFN_NORM = auto() + +MODEL_ARCH_NAMES = { + MODEL_ARCH.LLAMA : "llama", + MODEL_ARCH.FALCON : "falcon", + MODEL_ARCH.GPT2 : "gpt2", + MODEL_ARCH.GPTJ : "gptj", + MODEL_ARCH.GPTNEOX : "gptneox", + MODEL_ARCH.MPT : "mpt", + } + +MODEL_TENSOR_NAMES = { + MODEL_ARCH.LLAMA : { + MODEL_TENSOR.TOKEN_EMBD : "token_embd", + MODEL_TENSOR.OUTPUT_NORM : "output_norm", + MODEL_TENSOR.OUTPUT : "output", + MODEL_TENSOR.ROPE_FREQS : "rope_freqs", + MODEL_TENSOR.ATTN_NORM : "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_Q : "blk.{bid}.attn_q", + MODEL_TENSOR.ATTN_K : "blk.{bid}.attn_k", + MODEL_TENSOR.ATTN_V : "blk.{bid}.attn_v", + MODEL_TENSOR.ATTN_OUT : "blk.{bid}.attn_output", + MODEL_TENSOR.ATTN_ROT_EMBD : "blk.{bid}.attn_rot_embd", + MODEL_TENSOR.FFN_NORM : "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_GATE : "blk.{bid}.ffn_gate", + MODEL_TENSOR.FFN_DOWN : "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP : "blk.{bid}.ffn_up", + }, + MODEL_ARCH.FALCON : { + MODEL_TENSOR.TOKEN_EMBD : "token_embd", + MODEL_TENSOR.OUTPUT_NORM : "output_norm", + MODEL_TENSOR.OUTPUT : "output", + MODEL_TENSOR.ATTN_NORM : "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_NORM_2 : "blk.{bid}.attn_norm_2", + MODEL_TENSOR.ATTN_QKV : "blk.{bid}.attn_qkv", + MODEL_TENSOR.ATTN_OUT : "blk.{bid}.attn_output", + MODEL_TENSOR.FFN_DOWN : "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP : "blk.{bid}.ffn_up", + }, + MODEL_ARCH.GPT2 : { + # TODO + }, + # TODO + } + +# tensors that will not be serialized +MODEL_TENSOR_SKIP = { + MODEL_ARCH.LLAMA : [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + } + +def should_skip_tensor(arch : MODEL_ARCH, n_blocks : int, name : str) -> bool: + for skip in MODEL_TENSOR_SKIP.get(arch, []): + for i in range(n_blocks): + if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i): + return True + + return False + +def get_tensor_name_map(arch : MODEL_ARCH, n_blocks : int) -> dict: tensor_map = {} + # Token embeddings - mapped_to = "token_embd" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None) + tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox tensor_map["transformer.wte"] = mapped_to # gpt2 mpt tensor_map["transformer.word_embeddings"] = mapped_to # falcon tensor_map["model.embed_tokens"] = mapped_to # llama-hf tensor_map["tok_embeddings"] = mapped_to # llama-pth + # Position embeddings - mapped_to = "pos_embd" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None) + tensor_map["transformer.wpe"] = mapped_to # gpt2 + + # Output + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None) + + tensor_map["embed_out"] = mapped_to # gptneox + tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf + tensor_map["output"] = mapped_to # llama-pth + # Output norm - mapped_to = "output_norm" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None) + tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon tensor_map["transformer.norm_f"] = mapped_to # mpt tensor_map["model.norm"] = mapped_to # llama-hf tensor_map["norm"] = mapped_to # llama-pth - # Output - mapped_to = "output" - tensor_map["embed_out"] = mapped_to # gptneox - tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf - tensor_map["output"] = mapped_to # llama-pth - # Attention and fee-forward layer blocks + + # Rope frequencies + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None) + + tensor_map["rope.freqs"] = mapped_to # llama-pth + + # Attention and feed-forward blocks for i in range(0,n_blocks): # Attention norm - mapped_to = "blk."+str(i)+".attn_norm" + # TODO: is there are simpler way to write these 2 lines in Python? + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None) + mapped_to = mapped_to.format(bid=i) if mapped_to else None + tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt @@ -105,56 +208,93 @@ def get_tensor_name_map(n_blocks : int): tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth + # Attention norm 2 - mapped_to = "blk."+str(i)+".attn_norm_2" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b + # Attention query-key-value - mapped_to = "blk."+str(i)+".attn_qkv" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon + # Attention query - mapped_to = "blk."+str(i)+".attn_q" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth + # Attention key - mapped_to = "blk."+str(i)+".attn_k" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth + # Attention value - mapped_to = "blk."+str(i)+".attn_v" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth + # Attention output - mapped_to = "blk."+str(i)+".attn_output" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth + + # Rotary embeddings + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth + # Feed-forward norm - mapped_to = "blk."+str(i)+".ffn_norm" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth + # Feed-forward up - mapped_to = "blk."+str(i)+".ffn_up" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth + # Feed-forward gate - mapped_to = "blk."+str(i)+".ffn_gate" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth + # Feed-forward down - mapped_to = "blk."+str(i)+".ffn_down" + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2 tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt @@ -203,14 +343,16 @@ class GGUFValueType(IntEnum): class GGUFWriter: - def __init__(self, fout: IO): - self.fout = fout + def __init__(self, path: str, arch: str): + self.fout = open(path, "wb") + self.arch = arch self.offset_tensor = 0 self.data_alignment = GGUF_DEFAULT_ALIGNMENT self.kv_data = b"" self.kv_data_count = 0 self.ti_data = b"" self.ti_data_count = 0 + self.add_architecture() def write_header_to_file(self): self.fout.write(struct.pack(" "GGUFWriter": - f = open(path, "wb") - return cls(f) - def add_key(self, key: str): self.add_val(key, GGUFValueType.STRING, add_vtype=False) @@ -269,7 +406,8 @@ class GGUFWriter: self.add_val(val, GGUFValueType.BOOL) def add_string(self, key: str, val: str): - if len(val) == 0: return + if len(val) == 0: + return self.add_key(key) self.add_val(val, GGUFValueType.STRING) @@ -323,6 +461,8 @@ class GGUFWriter: return ((x + n - 1) // n) * n def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int): + assert tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" + encoded_name = name.encode("utf8") self.ti_data += struct.pack(" & VRAM_REQ_SCRATCH_PER_CONTEXT() // default hparams (LLaMA 7B) struct llama_hparams { - uint32_t n_vocab = 32000; - uint32_t n_ctx = 512; - uint32_t n_embd = 4096; - uint32_t n_head = 32; - uint32_t n_head_kv = 32; - uint32_t n_layer = 32; - uint32_t n_rot = 64; - uint32_t n_ff = 11008; + uint32_t n_vocab = 32000; + uint32_t n_ctx_train = 2048; // the context size used during training + uint32_t n_ctx = 512; // the context size used during inference + uint32_t n_embd = 4096; + uint32_t n_head = 32; + uint32_t n_head_kv = 32; + uint32_t n_layer = 32; + uint32_t n_rot = 64; + uint32_t n_ff = 11008; float f_norm_rms_eps = 1e-5; float rope_freq_base = 10000.0f; float rope_freq_scale = 1.0f; - enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16; - bool operator!=(const llama_hparams & other) const { return static_cast(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT } @@ -1023,7 +1022,8 @@ struct llama_model_loader { int n_kv = 0; int n_tensors = 0; int n_created = 0; - size_t n_tot_elements = 0; + + int64_t n_elements = 0; bool use_mmap = false; @@ -1051,9 +1051,9 @@ struct llama_model_loader { for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name); - n_tot_elements += ggml_nelements(t); + n_elements += ggml_nelements(t); } - + // print meta data // TODO: make optional { @@ -1123,6 +1123,10 @@ struct llama_model_loader { struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) { struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); + if (cur == NULL) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + { bool is_ok = true; for (size_t i = 0; i < ne.size(); ++i) { @@ -1332,7 +1336,7 @@ static void llama_model_load_internal( } GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens"); - GGUF_GET(hparams.n_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length"); + GGUF_GET(hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length"); GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length"); GGUF_GET(hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.feed_forward_length"); GGUF_GET(hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.attention.head_count"); @@ -1406,22 +1410,24 @@ static void llama_model_load_internal( } { - LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_version)); - LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); - LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); - LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); - LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head); - LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); - LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); - LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim - LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); - LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_rms_eps); - LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); - LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); - LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); - LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); - LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml->n_tot_elements*1e-9); + LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_version)); + LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); + LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train); + LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); + LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); + LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head); + LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); + LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); + LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim + LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); + LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_rms_eps); + LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); + LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); + LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); + LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type)); + LLAMA_LOG_INFO("%s: model size = %.2fB\n", __func__, ml->n_elements*1e-9); + // TODO: print number of tensors for each quantization } if (vocab_only) { @@ -2310,6 +2316,18 @@ static uint8_t llama_byte_to_char(const llama_vocab & vocab, uint8_t byte) { return false; } +static uint8_t llama_char_to_byte(const llama_vocab & vocab, uint8_t ch) { + if (llama_vocab_type(vocab) == "spm") { + return ch + 3; + } + + if (llama_vocab_type(vocab) == "bpe") { + return ch - 32; + } + + return false; +} + static std::string llama_escape_whitespace(const std::string& text) { std::string result; bool escaping = false; @@ -2446,7 +2464,7 @@ private: if (p == rev_merge.end()) { // output any symbols that did not form tokens as bytes. for (int j = 0; j < (int)symbol.n; ++j) { - llama_vocab::id token_id = llama_byte_to_char(vocab_, symbol.text[j]); + llama_vocab::id token_id = llama_char_to_byte(vocab_, symbol.text[j]); output.push_back(token_id); } return; @@ -3373,7 +3391,6 @@ static void llama_convert_tensor_internal(struct ggml_tensor * tensor, std::vect static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { ggml_type quantized_type; llama_ftype ftype = params->ftype; - int nthread = params->nthread; switch (params->ftype) { case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; @@ -3399,6 +3416,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s default: throw std::runtime_error(format("invalid output file type %d\n", ftype)); } + int nthread = params->nthread; + if (nthread <= 0) { nthread = std::thread::hardware_concurrency(); } @@ -3669,6 +3688,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } +// TODO: after the GGUF PR, this likely won't work and needs to be updated int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); @@ -4876,8 +4896,8 @@ int llama_token_to_str_with_model(const struct llama_model * model, llama_token return 0; } -int llama_token_to_str(const struct llama_context * ctx, llama_token token, char * str, int length) { - return llama_token_to_str_with_model(&ctx->model, token, str, length); +int llama_token_to_str(const struct llama_context * ctx, llama_token token, char * buf, int length) { + return llama_token_to_str_with_model(&ctx->model, token, buf, length); } std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) { @@ -4894,13 +4914,13 @@ std::string llama_token_to_str(const struct llama_context * ctx, llama_token tok return std::string(result.data(), result.size()); } -int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * str, int length) { +int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * buf, int length) { if (0 <= token && token < llama_n_vocab_from_model(&ctx->model)) { std::string result = ctx->model.vocab.id_to_token[token].tok; if (length < (int) result.length()) { return -result.length(); } - memcpy(str, result.c_str(), result.length()); + memcpy(buf, result.c_str(), result.length()); return result.length(); } return 0; diff --git a/models/ggml-vocab-aquila.bin b/models/ggml-vocab-aquila.bin deleted file mode 100644 index e06b39b5a31c1ced47e4891fc892bc956e97bcb5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1963875 zcmc${d7Pa^mH2%G1X0m;$NkD4WVwW}gFNgY+ZDnh>%HmjBn_SPolZhPWaa_fN1b*V zmvMVY(n}yDHwh#_2zl(>cA3$cc3)?_qmDW{YLDBD>%8By)DwJv^T+S~>kS{0Q%_Z$ zI(6#QsZ*y;)m?eZk|j?Ok*T-n-~MM0uYks>t%UM*5L;3yCpaI{Fn zfMZ0O1{^EWHlR*h1{^2yHUs91j2Upe$cGGA02v0HKwk|wQKVzQNg|IJa55AcaEi!h z3^-L})_~JQzHY$j5O2U4B0n+!Zp;~QrpSLAkkO@S?blf%`xtPx$kPlsN936XoJ*Gt zI8WsH2AnT)paB<%yv%?L;f4Vhi5z0UYekMQ;9`-Z47dbF8E~n{0s}4+IoW{A88rhM z@X&zQiJWV|6(ScJaHYs41}ucV23#ew(15E&t~KBqksA!S7S0>+dXWJGt`k{m!1W@- z2He1)7;vM=N&{{Zx!r)9Mc!yYlkqa3C324ei$vaJK)=Xa3>aW24Hy)8hXIR4-f6%R zk#`%gluRR18x=hfC0k@fC0-y{?35qA|EwiMC9WJw2=)1{z7Dp0iz=8 z3|JvDVZchn#(-5K_Zx7V$OZ#ei)=LDb|lJxJ4Ch^@CK2G47gKdn*nb`*bKN!%sO@}s%eA}TdBLCvhR%U@i4~cx&p@&7j@6a}pA2_s~ zN#f8WB0qNMQIY>}=rNI>Ikba0N^y|8n)@^tr`&{Ph+S!6GV-XgNMLvLk*I`lS?r#kd@k$oL{hsb^o{WbH~p?8Yx z@6fwMp6<}QMV{f%-!Q!$8WTCdq4$V9%c1v*Jlmo7G5Z~QzsPeO`dg9bI`jdN=Q;F2 zl!HSb5_y3`9~OC`Lw_goB8NVL?r`X%A}@C6Viy}}`+ z?v)N9bv1{Ox>q@b)E(pyQg^UJNZlb0A$5m3gw!485K?!zLrC2b4k2~(9C}dXNQaQW zS389K9pw=6ceF#u-!Tp$f5$q6{M8*o{*H49`J3+$@^`#L$ln5okiQcgLjF#42>Cn7 zA>{95hmgNh976t1bqM)8%^~FPbc4FN$QceHf3I-}`8(4gCnDA>{9ThmgMu976ssbO`yo$RXtKwGJVF7dwRfUE&b(cd0|j-(?OVf0sLi z{52dx{$A%0@^^(p$lsL?A%6=ULjJCD2>H9(A>{8GhmgN(9YX$I?-25LokPgq^$sC_ zH#mg+-RKbVcauZN-^~soe@%ywzm`MD-y(;QzkY|1zX6Akzd?tPzr_wAe@h%f{+2p~ z{0%vT{N3UZ@^`C4$ltI-$lo%DkiX>)A%7zdA%AU$kiWlh2>Baz2>Dy#5c0RuA>?nB zL&)E44k3T59YX$YcL@2r!y)AF4GtlHcRGapz0sf<JUfhmgN~_dddk zQFIH*9p1fe8Y=GIy|;Z|Df!)2WcQ|N8og)tzI;}=yHWCcqEJiZUcN{zc(6fi@Yy2dcWiWPfau4?~fvjC4avcSvQSn{B3+*B>CUk=WanW@&Wriw0w!= zA4muh+YdTC+1)02z!P1D;6qV$1neJjszyaV9A6pg{KNKjWEIxa9`3G{{O=s&INX(w z1T6!SCvX`?Rw}VU@_^SMF!C`cQKjKyUSx6_{(syW8tRw)%tQ-;fgqN7;!j(^0 ziDBfaFzD!`C3mQ`Lgc=Xrq*)ExX-Jtn`W}Dad>KaljLi>4@?B5WUc3n&)h3{z!P1j z$T}|thqTT*Yitx^#&`EUNk&ypeR_=to@tcEL~3oLr%-bW|lIO)?3xV<;>Fc2|LB_H(L7ndjFoDptK%H6hF~@K=Md2Hq)rZ4XOTi$pfC~ zB3KU^u0e1GG>jb^63feS@u9M|~txa1B` zPjp$TKV{#$g=)!9dG{FCPdol8E0cfPzPIS?XQJKdb+9A?*x%BL3&2YZE7Vr*e%Wx`ByJ7+1(;}!0U7+{jLx39g=@HO6WBBo?}PwQI`4l z4C4^$SibKW*bK_=?>kqK>wojg6Ld8Hw!+pVv<_H3Od1{{Fk68DdHP_tX?fer#V?fc(dau={m5e-a4k)=xZRYWhA^P5+T- zcvRKOe*~&&tbw0ePFlTEYPI$HXWlp*{h8O8RN4Ev!QDbFwVyj)mSh+#bJ0q7rz*@y zvA$@(|1-78vQfd9$-jts)vq?dFA{>yEVlAXXCbTMFa0~CsM`8hK>#a#{;TNmK_b6S zmDj7<|FyR_rPkkXcGv!wwK}cY>0ilzW5`w_Lh@fhuByZTa>Qy5Bboo5awa4XcxoE9 z{MM))!Ql99G_9uD?;IZQqOX4EcrYteKz?sIC`u&!_nxzEddKtzJQ$urC7=fKNR3Z- zP+0Ou&l`u~Na!D<_m|@ANMt=I`JaNPZ8cQ>C)L=XGM!S>>$ES@-NzWy8kS7DdjYpt zMgm$SrF3m0fbGf3B~P&ugUcj)iq+da&ia!(+$~lQN}0avvay%fRl}nrS){P_b*k)T zm{u*5axcd;IgOC+ZC~4M$@cE zi48gm`#aJJCUUmF=cozGu%ZdzAq>LgH(_6dj zvY%~uy2aq_l07>rcPCZ)J!kg>ohzSh)mvEA(iat2XS2RN73j6+V1_>-Wx$h2fIKJY z?#If_p5t_{?~Y6U9K+fzE|jwRycTDkV`YM~r^q6O0jJB-b5pl>O6f2wIV$LRUWa7> z`N<;11X6?MdB%m|0V(4<>OuFj&yVIuC3}AA$QIRM&+k4BX;&Lc|413R6WujxhrS^7 zV7p{5Fx0vSc|pRk5u(c~wT1ncywLMh@|fK(3|XL=VVZJgJdrX zyy#O_JI@##MQUFZWT^A|Krf`qk_HYmqK4212U;0$GEoooX4MPv;#B+zG-&o>%NSh2 z%yzI_oGayvW6)aahImQPbG~FRG2|otlE1{Uu+4yHWu(UGBa6sOqv1t&NcK{zJTyEk zxqse0HN9O*|2(QYpO;yQ5n9S#=19kcYvilF}*$s8P7$*TW~)YdM9u}mq19|P%?UVBRU`O4J7dfmoVDV@b- zl$%gxQ}b@DS}s{FP1 zW>iuhDWn-Yzl_urYZ`{b;i)$dKy_B7Hmhzp++^a&Bc(jtsOc8VY37K)-Mydovm-2L zXo>2?BcjjD@F=7!?t}Q*BjYm3<|WE@sGZ~)Bco{1dEVg(L_&^?@@GhPWNPgJm6b@b zU^7e0Na0UmIg(dXRO-|Mld>rP1Kscx}YCsKZt zai9%(QEH-VzPO`}z|lcieYDd(2{ZGf9Tgib2r46mpuk!=#;UaFYIclQ8An9(W2}Y4 z(Lcwe%Ij5^9&5!1ndI5ADPxW9IwGZ35v%jDj%xihYQ7#EWP)aOtE1aD-3ogO!Q7y1 zixkegnR__1@LE%<<^$FVn;+MGCOR7{i;V+IkZ!oj_Pa;cOKV@Y7|9q%oT>(U$W1R|6>+%1^rBjMdkeu@*OW*~PpO7fk9+xLL9TRF0oai}|)02o-7Afs5!j|&bP@s<^#l}^I z?Ife7&FGxu7}z^89{EYu{`~rJQaU&~$og=ym%(a6)n+GKnISBtJTk_+TXir`cEYA$ zr<@X9?_VmJ!y~I$adU@ZD7x|#BjT8YI37wJG@W#+H-KH8pXw+l5$!Sx>Hdz>?SKI$Iue%$?n#y9C^gyLrE}~| zufs-BRY0Vef6R#d%;1E|%$eR>-8gQgx3=%eyoGhlq%2E#c4A26p0j(B1+>hJ@>RNq zpXCV0F=4W^yu!pxu6~KLoc}1xGBR*p(5{>ved=!1r6W?hvtz@0wxe6G^DJ^yl`#v> z3FOEjR`)r^-w}37=Xi|?<-<8nms+gn2JzEdRoEh>wjW!cNU6PsnR~7ym{fs2FJQG0 z&x;19H|cs4DHV+eq>L0hd#nRF--ua)0G%K8nPB<(MohQ3Rm$@n)%xyQMo}*C)cEv9 z)ixJ+l__0nB8OS5y{XEXlj=fer_O{6t-T?H;KGFJ5w-Oqg`ZsaK^H|HjbgJ$iYcJV zCQ@j7wV5t*63ln_+JteP^5C_h{<=5=Ohgdv*BT*h-Gw;V8Xe%kv->RD%5}pFum4JR zal*D;J$)Bj%~n4gsWKi_v*;2dV5ndBz?XOxwkjA&d8Bj&WvX9d9l!Gi&QiUUt}z=W zyENFYrxF!R$1V->)k}-ynk)4(uZizhW&E-jq{}6bjLGQ@*f-^6Uez1`rv6!tE)RpL zpOc;Ja>t`u5(Dvar(iv{O&K{;(>v8S-0*U^&{az`tOxUsk-QNtV^<`Q-R|pB=||{G z7AclnU5g@xS6X*xuX7=I1E<6lB`oV%q}0x0u&yw=y2UbPVdPBELA2MEUQ4%`XqGF3 zgE;#0$iZu_!f|ELc?(C+*5sR|kw5%&ztxE|UCe$AXT~jeDe6gVg7EO|-xmGoIIY)6B*En&^59 zC(1Q3a;w?n%C&~~xVmHxt)lR?R&PLs`dTMpg3in99XW%iswz_0{;J|5g&=iVR$iYv zft+0DwbajWk7U<*r`3xZ8JxwSnafDwM^deLy&-5VLbqqvN3%4K;wP2EDRh(7 zQumVXrEW6z^|NaE_ucsS@M4ycLTvQd=4Qj)Ez~%;Ik>NGk^E+(det!2q=Q5FTBI2r z=x%|TSu-kMgg-LDG+0I{tV~sKEw94~fXaQ#@R}3D!EW(dDIIL-im)g((a{wlQo4V^ z$cfY>lXsv415p7P zT_<^@)PIOh^HjIMjnO7%B3PwXK{n_Z)WMkHV2_4ULa0bSyV)%m_O)S-GIv47N|oh=Po&A49b zHPtl2w1|`*Bw&&cnZ&eim2AjMVJxdjF=SM5P(C1K?{#nnJt@>_x* zY!ke|ZuQQ}tyXwsm2UZNwZg}sUvKrM)TU)fBc&a~tQ<~cY*Bl6*w7A9Js&nyXYfbL zVW*lwS!Ts`i11Wpo?(gG-3o`)uCWUoJuu09+on4Y|qh7^lwsej1)GLietOGpZipo4q+>lk*VetBk>nr zaXog+qmo5RO+WU>e-XpE2=!d1f=q3yje0e9rMlXVTF<)0Rgy=Bdc@XAS*3Ppr4`)5=hGy@_ljUB5NNw(7K*-jUwr$-O@N@pw~73E4JdC1Qc zS0y5!&_P{gt+OV*L-NSsC`@%*q$WA?kD1@$rN-ePlOa+Q-Hlr34Uxxj1giH9(YY42QVz5Aeku2)*rmyx z(JqTHe2Wyugl;lY3dM|*_fDrtt${Zh*c!!s_r|1SvA5Wgn?87B!nqxtTt-UG5Vp;C zB`SBSJVi=XKh|u@>*}&t-sMbNucr22dM%YPyfl9qIhbYnU)o>|;?b+hJc10%U)gv3 zc-dbC16!PiX|8Wr)-LOQ$~(%B)3a1QmR*>?(QJt)l$3LTUPsx_n<#YKzp~>}AH)G>sfK>?qM@Rc@2I z%-$U1xamPirjf+^SAlY-gumKcui8WdT7+A zb*^=r6b_Bz#P`y>Cm^%;79(Xu&kNpaU`t2jt=;FUMhg>vkN%SEt= z&Q&jis~a;UatJ-5cJJ-ZI@FuIBf5h*GbOcm#AvKQo+C$9*Q3((9873TiGU{%kK!FM zxv0M*8Q4|LjTqRyDz#0QvA=f0hl#|g{dIJyyFopYDTO0sN}7=xX9K`e8Y#S<%&5X~ z;DJObdX9Qd2k($x!eJX9d1v%y;Zmu+Gx~{T%Xx03R9SVTnNrZPj@V9P&L9L9DFZVe zeO;v_Knj}gGJ-}{Em=yeXzKiS{5gf^pn$PS z-(>_0v4!a+WHa|Lk>%adpP|K4dv`R0alnS}-BzWAsnLuKw6ZS1RcbT)-y)@0L21X) zVI*&sUdmy8W2OC@U=}B}Tc!3lhP+#xAX&m6F8#C4+i?y-D53CSU>yhjG!rRZ#db*ZJ<*f~P@qjMj?=3S;ma=G6E-W-HpC|P zJ=QEn@PzaN&jj`jYteht`l3_!z1G8HkCp7b-n6b|+ok#5pn4gHab5-`QutRawMi-c z>v6|PFKSLsZ-z{HpVcHnZH3g{m$%$lm*m*u&X}zJE_=$*8^G+QdQ5*5duq528-CO4D=LfU?@9oarZ+?M0+G$v|0_ zUaqa@M?HrLkmLOGZ=(my&2g!DiVe9PzN9=t0Pd4!r0_wi*_%?#Vpi@*aRSS5l%DDq z1Zl9qMjk6Xi=BKR_&z)&HHV1=!;tzwH9BB#Mrsm;%fc5aJ(PocA8_vBPb?!tJ+^D4 z_kkGqWg1icLB}`VRdrmeQaja3{-Dvogh%K;xJSO%?~a51585!?zEX;)H-&qYb@oZA zZPH6A_KvuBKNNgKk8PLQhrE|>@4j8KfD0manhvvu;(+;(w`|1g%tB=pA4;nZgI-oB zJ1FH|q&Usc<~|(FO>d)G?ZeIz-5WaC;&i$hWz>3siIGw>b4rR2$1p4(k@CYKYa`~8 z=mj#)`Q`5t6E%#p<|&Q5)J;v~O_+DM`FDm7oy9p_sx@;r3uzH4-PPgyh!jyJ3`8%L z>`tkJ_aojKK6aE$EmGqsSzP&%Vk^%;G(Y03Q+sk#9M$C?F%$OxZG1@a5gW|KY|cL7 z!#>UqruPx!*dXo@`KW;{B9m(l-ZqFi@lhv+xE^L!Gg5?LdOmwVY8fd6ecDLxrWP z`w;6!87X?l81x(yiVR1_$Gt^We7mIfamS0X*nOP!XBVm}ixPxbqx_pGg?eW+Qwklu zS&B&M=?xRT%2O9p@8gNTESI0~HjzEtSG7;r7!Y`teIk0Orh0-`5YX`nr^@!*Oa=|$ zD?VWZGdjpr@S9u<4h!jPT zawVeSF!Pd$=H*w7qD1bqs>9rGsNI)Zco-V8`W z5z~XgGD=`V(~aI*3%X>XEb5WFGagFJ;k!N4vEYo zrN<~}lyye$=pa#GW#p)#yh(bVgJK`&-iJ474lm?f&#h|X(MD@njqE5vaAbEKLCbm` z+4K@v73xVIvZU!WJUF*-dT+fk?D#sjbW*E& zDXvzfHZt0orwG#+K@OUcr)N4i^ioOI)p02zg^y|GUMcUlc837+NRMQaZ@9V|S|q3nU6d}NU{BZrtN_?JgY=frj?BBgT6D*HeT{xa!Rl{T|v z$Oh}~4L3?{gB9r(hfB60dXBzlsoM}O-pa8`3UZ5EK)dQ;A!*0sI$l}GdUwHWQZYDbEAmj8lk}Q_Mnlqtc4);B8T8F zUEVfYdo5iC9Mo$V*+%QBvTb8Dv_eByH>SRBREJQN+RQ8{Q)=d3ES26yYmfs2OoUBo zOf>YRwkbHYnDwm6S;J^GH>IXDa5u6vl4!jYRZ1^sl#!amFSS#86%xH=B^_@}6%s0| z)vh8<_e2*4=7{_j}t`}NQ)Dyqr*s1%;jEe?~?K?GiU2{|(u&*x@GZdX4+ zt{1*)o4t88WblSYiiJ_P0G=WgemU3t^3B$C8;4Ed;OO9DDK{H&L!+Fg^`f>~9-Jp_ zamcS{oj^gZVUHi8MRpyIV0+&R97!_$yTf1 zEl!l$*5DL_oo)5z$MH|#Q;8I3C>Wx-m%Byn!6NF6tCHFpZ4RiGN(k5na(??z%2JQO zL($~xC40!5VP~%#ixl@xG#dCJ=gsmZE2M}V*|mBIID3(R$Y*T32;@1w>HwgSyZV{j1xSQ#mfv2^-Iim8JQTR!T;iB-{y3?xb~8$Oo! z`MAnQm3kE3Ws%a=nmtpbCUF+xMtsbOIPOF#s_$DMqKuTDwQ#QRm^C{xJiwhvZ%94q zEK)nHKmEh21|*9dWem$lD#voj0m_b;BqNI#OR>Y-JYGt#j+J%4^iq-KgIw-=-0<#R zk20-29%5|^rdn5=V#YVSl7|53LfG^=-Qp}3tjIT2M!tp$7LNy)QHwa7a)PNe}4NTf7IhrMQ`beoS8 z%`@0R%dsr=!u922@Pjxd*#b+!JhB*n@&uUsGsK`v@-J8n0nxcgRXM z9qq9-A|hZqQG}O`HC;W4z4%7VXm<(&W%DQAq`LnG|QdkKXGO52W-lj2XsATPq}^C?HiWJa@RDd(J} z(u^F`i8At2MxaitA~FaTKv|TZ3L5otRg8SELb?Hp<t8r;LL{)qOhHyNIdO zj4YOFW?BAeN317{kFr0|OH)OZm|!P%pOlf(89pw(Pg}264Rfw3pD}99OZ}O|uMM~( zvd<)ra;OPA^txWt3%3R^xf~qCPh5N^C|j)I=79t+0aidSQo0;qvVJx-u!;Rx?XxaA z9JFMgO&#FMggFWD?Z!7Xd^YV4H~(ji@ZD3mAc{byE(mxVIcvLXrT1B*hXYtFS^4`I zHS`gOXMgVvnLi6L)Bu?5?}Pj{*Xx>IoOOLAYgQh`RhbdVqYj!{o5`f8GXuaCkBI^;0$8?9iPB z94kH_jSlEG;qx(kY_>4qy=J#KiTnL73X@nVHHS5(O0s`Uyt%KtMwY z3GY7!6IPCDP+b|dbZ+AW^HjIEWtGPI{gY!<57wmA{wcRp+SCzF(Ewj;KY*(CLkTKa-> ze2S$o|3Z>7y~^VAs7R#=MP%jo(rh@?5Cy%)LV(_AT(c0mLkRCNT7c%Ly^pgVTKYa>yL6OE#_9 zmz^e@)!=JJif~j6=e)|>#4=x`iY$8A1?88WCUtRmIS%4999F(!8G1{+_LUeQT=h=V zqmq3k*t}4KESq03R$@`&ee)cajxFqYi&T#0Q0QMVcA!m}#=R)0cOTikeAQ5lSR~9> zqjN2D^<-a-&T&X{w=}ENMh(@B6b?ZSB#M;Bc_9(3Uk&E#eV<;!IWV*sCyIP6`lJss zn6h9IiErivwUv^U>ucUNN4|P;7b(utbaDP#;O`&74oHcmY?#U@r-vJaUwkdNa}Di& z-NXe!u6^CUGM-$5__|Z0r;VHlvJFKAd_A@NfV$qi^pKv$7ExMnu5%P+PQdc(K}!o; zwii|Ptcq3R8<91N6;bL9{8 z{)UmhVkPTP&tVYY+xez4gx4U2Lv;3F+YNZQdw}PR(#Y81Mv=oqgLosJ9+i96YC65WSyQIU6xsifuBu1{+syT{XPw}vp%?hwuvD{WBhSiM?huOeU%ho*eV)+P z@pWd^H#J3lYt;N#6Okb`(f&0CV|u$TOaE%5j;O!0j6x{wURCG`q*cBfebnt=S{}Y@ z{X)~}Uh%tzW#t|0E}P$V;%?_{8*8*%U=4mZNr0ZrmQ_lld3u3eIsRB+m+$$w52!Y( zeJ@qkNYm_lPbNm$!rZ7zbo6ldd(QrGlztI88upFnvnsa>o!Ik6R(TBd_q`F~r8x7e zecv0I(7*>KhTROVzUlTg&Jw?GO<@aQumuJUsM#vT_oMTtNcnx^$Xz!`&!G|ah5wdD zLGM%6A~lKD=UT$QdE<85X}3D#Ff67qptby zjzPC-MAk)09bo98NU3tb9hOq89uG?|aM=qDFjXJpksmvLdkZNvLG8!EomRhO4)+go zm%aI8Yga9L4e<7q_FX4T%43)Zr2MflYUmB9gdZE)nyDO?Grl)3huw_3=qFCEIvXFM4|?Ui{E4@QOQrV{m!Ks)9_3AGTpkvbryyOe zpe*v#v4Au9KdjDT!uR95Mt5S6{ztIkD(u4qufVW~Quv-ARel=sy@Jq;+E2YvuB)kY z;iraRw+4DPBS%koIFb0NQM`DFwZ5v4L#h0!_fAJ>XXvzEg>wf)$)n7Y@cVaLI8)Bn~%EiVz}bzS0K>Y^nmmt#T-}l{c~>$ZNX-*_VeiH zavng+er_z|+^hNX?t%Ynm~*$#|9ET5ktj>?o2C?UOgv_!boau3^yfyg3WLn0PTCRU zoHa&(U^W*cvU^OLsSK18ju9!1@4zPKcl6YyU|>=l)_5q zB;-GXZ^(>Zp>O`D(a}$gq?h9Ym_GGWRJC++mQ`xAwpNwY^MCn;@lYTAz|8(d>f~1D zZT5>jBR_@m){yjOlwdq{7DP&wGM=wrxSQ3~F_NMYt$qYhR(SPwi=H>BUIW}Fzl<@w z4IfJFm)<_n^*g<=6g?Y6wOMwazNyl%@DQS(9RYI+AC8oPj?0i3;=Nyn zt+aA!DA8YeYn}*O);F><*!TzaKEyh z@FFJ@u<+}|%xy?+?bn8Wl}5tBFghssnP2_huV5f3d_XUT6}^fZLnL3J2qzx{J^^d@>UzNz*02qfC!U3@cxl?75y z0E*u_**Zw&Z;d@C5}WkfBqR24!S7-ySF7Dq`<>Mo)G(N=%F$=Gnx0}FJTODF_wS;M zt5)dhTtudtQX8fGT_Tx#NJxd=@98;W4wW(9es83-ZkO8cos`n|WTm#Vg5cj_$x`Gd2yAGZF%P_A+(*VO*57qXj-%n=#5Pj_*wkU%r&4!%4d+!h4qN>;_TGXcjrn2h))a#`fXYox}@NL)~`odWL$C&4X?4^r8ChdENZRg8BT6c$*pUgZ{Q=pj1(-e+xcHNO2b*I*4W-rQ( z+%B0z_(U6liQAH_ea#9J;fN#!vAu<8*GT#V5!@#pMjXPura(d{zxRPgg29+Pk`D%ue<>zrQ8kf(Up z)fKr_`kvxl_x-oaSnVk@ez6yi!N4l1rzEi?I?bPA4R95f3DkIs^=5?o`qF%gac4!p zwEZib#2w_t2kphKv1nWEyh$l4N;=qfok*#AV(S=m*_C_kGWvTqN^swj3eES zKkA}UYftsg`y4jALF(}hgvJoP_K{Uc9z*jrJf#oaARf)924!5yk#-cMuc+ZXQ0dzs z+f27i$ksGz%W zR@;43*Eb=!efvgVHNM%*Wr7#I1V<4gSPu0lf$ZSSh-U)!joxV_bz@%>8_q(cxvw$O z?%J~YwNtTW7+HC=tGfb(df%C+xzcn8Tj5BSpw&A28ZVBxfyZAB^DvIZ*`UHaRjh(~ zDXEuwf@5Zb%vE&qZVg^ReTq=_o2h9l?|AZB`hUXGw_i*<@Tyg^iB-0?pYaJBUvK5q z_lq76aG^De;uapL?g^A*W53XF6mBMT`j#UjtPjkz9Uby#q2v2mABl|_i?a6a2Dy6V zRU};a2|D{34>SOw3>1tb*7R)E2v3dZ1kw#al+bI}Xz0{~0ll*(PxEnCAwuMz7CqM6 ztxOZWXkB|+bYWmf>Rwu3q1Lmplo}Y(C8(;aEn954Gy}VyJ$9u1G$$HIZytqp-c3gT z#-cD6M$p4WLdUZ57~E8sey1v-*VN0@yovLyBX8?%_9({98mzjxNa19J;j+K?S1q;& zrEhTYx^7TId!h@pN5_L*sxF^+0OQlf$rDkXn*4f^0rQzfsbdP*s8ZZ#ctK! zc#AysnQWD@s^l)+twl;*IQL7(>1e4p+QFg4gEH%7Fd*~={oejDHx><=CuuHWU@t@j z>(diW6VmtekkJ(_BHU|F_wEpJGR@Z4p02BV>DEd%)mhY>!uV`>O-BA9wmi**hp_&Y z(tf(}s>OB9JW#S*(TEFsq+=|qsyZP^|LMlfi!PInqsN2Gt@Eg-8t*aHx!5JMj`ldc ze39Dg=MY-vqOjTK2!P!xdxrJWPC5IYVf5U1#KBT~h7T$SEhv%tGpy|!kK!ri)OFr2 zL?rd)u||}|*P(V|q&Q?_ZD~7t{7i#9y&QE-wY%aNtJ-;(qxK?7=OyKn?P(>TC=nE|Ok z(>gl;B;J5F=!kjJc&4+0Eh6Zeo?^eRaWHMK%R{^aycHd|+0F3c5vC8zP1NPJP);if zFTHz;CAbqg<1E2E9Z}WgLMP_PIBvw*M0mg65=}&~IE>5OGedAWCbf%L2c*&3#MY!Q zQfi72s(FBu6{qw@Rz1F{GE>E(II{z+!vrEU4lps$rM?*jb+9&?QrRYLN00ciA?2~_ zm>C+$D`U~LM%*B>2N-8tqnMwa17hjdXdV11soIK_oF2|rH0mxz!x|Y;r(zVv)h|=NMG=(Ju5L~Gb2%dmeZnsw(MEKz6ddEL~T75AgaSG z?B=sfZ1fO&g|wfgZe4aB|Ns8SQ{lnHl}q()i7}^#>MvZ(_HXOS3XW8pY5~ZMCp6BYbpZEYR``8z~RV4Qh#>Lu4T-C z%uDG}K;zlQZem8+U_3iXpk5@?bDizd_M#*GLp)|464cU-{8*r8j;MR1LXVVm94C9v z$9eTUCM{}oHc`aNhtXYCLBr2QmP!fH;C)tU@hGPleSO{y4a-Icq!dQ>R;l+{Z`GpA z`hw$ai}q+lISoWuE6qOR&mGI8?cepKHfBv8r8J~;i;Q`zttBwOh#W#KbYJN8xdBl2 zC7F)fPxxPF6Bal@a9?VH+uK;0juTc8g7%ykf))#P-*bXqG%of;?KwVf%a=?2IYul| z;MsG0Droph<2k8eUL%Xi)O=2wI2&L_+e;tGA6|v`(jEX|+e8`j<_8f;Tr^Qtt=Bcu ziQ=Y+SZ_3PymX3Ip)6*jFm6p0V$a**y_!`rm%2oh8gI@!IcnIT+@5RQIHfMN=UO*V z`5mc0*Lc9qWaKT23Or?w;ob0J{pvJmJ~#E&4~W$r*M4qrY|)_l@A4>u?`B%>6|1Q- z76{d^z{W6A`ijPS>3E8fx0m0ewl841{T-KCr+7YbJx(RI3_-ke{+U-H?L5TX5RqWpJV)%|VI= zG2!QZ;mkq*7dzW-A#*RZ5j&pi>Mt_r4xDo@GJ5SUZS6(YDUIH#zbGVmrT*&6i<~*C zGtynD#*1PMm+8SrGZjCFyW;IXEACGLR6bI+N%9|IqN{{&H(FO-+|r(4F;TKc-2a)9q1!9!R)A4 z`TXPtr04_iS@q4m{pyx##5bLmNGApcQK<(erN_&yZEv$%oXYQK#CP3tj~(c;q|x%@ zQg|u#=yp;o%)UoW9+=F3_6?TN@@ybb+2;KuM=*$%noG@a1i+7T*tqh!@5SDrx~LK5 zNR4;bGI#1;>c(UE`3~bjx7dA;G+vzgfkE5!BHE+$`Fwq0FeR2^&y9IymUzxpqX9j} zd009x4tDD^Uvx@;$D(wML~YJi#B5<+^&F)}t>80HS<}RV@XH}KfGvC@eJ`<&oy4O0 z5+AlPfq=sU=GzMgp_X`_QSl6zHZT*~(=1Yw!i*c_7VonmUHF8HX zxLhsp$WaFskv3jq5vt{-!7j8PmQLvzyg)D;4YA_tq3?`76VgkR=)IJ=ssJa*%oTgK z{be>%-6E5|m(6&@A&4!;y!JBhrdmgn+D%rHdVI6BDYjEteDl)~zgE(CnX!QDBI-zu z0?q=R2=`0d3vhRc0(lhha|>Oi9+9z^CH8Yo>}AG&Jq%(e-SLz$o_HMZ!4fVK&Zc5F zT+bOr?`6j1yY7@ZZ;NAM_5Z&-34xxb^hHXoF`_xoW8h^kH+1t) zkw(HW{TTDR87WaQ@~ zvg~2{cov3`J?qhUg?GTO*Ye04OJDO9#tXelU~XY)zruTjHv!SlBefpi7G7b;&3=$Q zN#y7Y)a#@ZDLvug=;jqMNqE^oW}{ULB*Rsw!DYSb8*ffy`sqRLTtdP2XB|J+<07Kr z0DZ5FVMUMO{*IJhKH;3h%Uxry6TC8-J&P@B!;4vH4L87EX`EZKMA{A=e>}hY6SIxb z5<;%V0xyp=qnwLK;eb=OU@EHjoVd*R%8&$BE(@(^<(iFe%G{(lda2qlo_-t>DT()j zpc5|S>EY>u6Qr-^_|&L*9NExGRI4Rf-lRK~nvJ)f)n!%2PBI!%)gl{NMC~SDeDej7 zU>+94c0z#1DG+Ex$a)!j5;_d`Dz%o?wo?m+F2OHb*217%F=%Hvvo#-~aR$5>)l{L~ zFLPCD2h&Dg<(<s|@uDz4Mg4YUa6ciHbz- zA}R!ds9D17YSmt4lr|^eKn2$k8wb?Dv$yLx}wztqY@%H^-Y6yg0f%0E)8nUy;J7|lJeS(Evj#aR*mems$H_XXpfWQ%GMmc5)Tx+^y+fUu7#VYamhCTR$YIf|0grm>J1lxN zfB{%LEGT37-v~Z^lcs)H5{V5Q@?}*@BY_)-8M`7T**Wb`hkr{{suHST%1Cj!5=A+iNM43M>K$g?U4`sNv>E%Z!no(a~_8{E9B&kMDuq4)Z1Y34;&u&Zo% zQD)^zefzl?h{LgGHxt_PVm2U3>bcG5C1KYqVq;Ye9xbuyfPFtNIHh3%9d8Wu{M??e z>wfRbb|4{wvsH&UCIpK<$JTQ!t1M`0F43tsP1s)_nFNrx!k7;F5N+R)P8ShgOuO2V zF}X%Yb%g6j8h1|B=WCC&fgf#gKe=&aum=C1+QZTe#P(h^+Fy^0+toK*yxK)JPWh2G z{v$*SRL$BIoItKByW?eAZ{SA*om^j<)E;J$t+4VuFIY83vlrh~Z~MKLhh;8(!(*dY zO!R$1d3DTK?|7tCt_c}ddKT+tE?Ag9PA&n@Zr+aK>qeQYB^cl4F zt9`Dl;Djr$@;S8)7tNSs81?|SBH?2b_PBIjy~mi_sb-wHIMv- z(u;5Y^q@cMHW#&YgGLPgQK6rXJ6ZaUvN1X6RH+@6=7$BJX;8A%y(IIUP0~>z94nS- ztUx0nX}6^5NRB^I+6f7R!7^(Pdgn(4C^Besr1% z8(6)wqm4y~*kjT-+GggE2Sj->!fEspe>)J_lkiBs{%B?HRq^zp&W?_g0WNX+S!a=I zz~6N?FYb{}G&ZGoCxQ%}o||O$=+IC7(u)i(Ng}Xwi41&MTueR2dV2@~Z^szj!+Q8w zJ0=DcXNX2T>CcJR+^{|?gn^a3rF^;v? zSgbU7sqa`LRA1p+B(-Bx@3c<+pMsSd1#~MqHx)V2rk zF0k8Xv6Fi6fv|Ztj(sV)gc}^#O!W{*j>@o>>Z!Ykr^*dRWmdO75;D_>vg~W&doxmc zjJieIks_v6b#UOZUtR={T6<&h&4bc(W6r+5?i$Ff9kb{+;w#nP2b2Zlv$@rFP~RE&|ejutEkc?P>%E7sqxN+AW{=JQ#hhJE(EfljZ6JFZ*DDulO30s z!0!bjWSk;OBYiV(lN)}`3i(b}wf1qL+=rIgN}fjzl^L$sTQtL1e6t8)H5JwR2rO7& zI`XL^VYQ1=pQI~ksfavITu zr{j1l$59v&^v4^$L!dpkWH{Yw7h4KT2ku$kwt&(mOs` z3Zo;75YIU;#7kPTAm%HqwQcLCKEa6SUrM;`3D&>XB7a(7EEOBjm>e&r0Z$z- zrava5N9JY1jg%7V;j|6jDm{meUm$Y{k?MFkF?EP1iXSaRlymeon9vmGvoVF4snR`)wDdo`$`)xl)ICv`_VFUCmO5m z_tj5wF4)6`khC3hvoF43X+<;`9@mphaG97K5kx*-Zw_4i4TJ2YG)35hp@$k#Mh`Z( zS>Jf_>m*;w2lb)ewkIs;j~`}^X~Z=Fj#*QdI)aOnOva8sSvpPvZp>r(`Q|k5I%->Z zN+YA-Nw3PgACqz}rRW;rN+)}F?bj8|#(rG-PENS=LET!U^!ENeQco!?Zw^>fZRSpg zj%%29<7A`n%7xN&h|BKnlZ{#3LgRZ9%$w_DUSjB0DI7ZDXz8RDh|uH5%ucqEQQzq7 z$)TBXD6iCzy`E!2sjrv0sHK;qxcqR6_dt)fINCWS`e-ND{YQS%J8=QOm2J?er^;+13J(ar)KiQdssZP`$g7API5kaNKit)acM9k$d@vmUX}AjTj&Elt8vn+c7mZVxBWwEbp)S6A7`E@?*zJt5Kfb$xj# zcLJEC_|~GdMnT}NIL$h`dbM<_db;IvS9h7dQP%E;%-~&fTpZUEjTX2jI4)ye2I2m) z-s#>?^JGh5QbBFpX zdx>Bc#0i=66y8ev1=ce{JuD^8;|vqk!!@uleO`?h<_zzzKFY(@)iaFlQ%;aZ^&O*E znrArMxH4g(*ZLC{?K6UHS1ujo_uIXInfJs<>w_I*slW(Pt%bMQTB+g0-b(!he&{__ z<1B~`y*}JM8!hV!KQ28dd+AVH=Ay9f%O7S1(Z?O-HI6`coDb@F(r({ttfwt}N43`& z16Z|KOkNWsF|3yfvXsJ=Pb8(MkS~@IX$DdcBlO*7+cBA^Jde_PNJM~PDs6jiC9Wey zRYIR9?o@5r#aqXOe{Wc3qpnJuhK}pgj=c(>O1v)59hrMg@OtIQh{&0)tML5vojG%W z#WoKu-*a|cJ97_D^jfvvIjo=Q!>n)U6Fw0olwX^f|N6T#jWdl^cdXE7)*@dwe8dDX zfgh3fnWnk8uxqAZUKLqmp$po`n4=&(i(t|-O_Gj3NjmVr^BJ% z>=8Dnvytrz(T)hD=cHjD6Dblzt5+9)(pf$_`ZUK5={w6?b{(a`CPZY#H`B}rif)hU z@y+iU`7iXQ!PZcA~1S z+Sy5f)k5&=2=%k03rqFAk|==us_@y{?TxczDx^)0oe6kDC=2wPw4<6*t5&Q12wRlU zZQn*2ixL|A%P}j1!$$pVV`+P7OFEIQ+IXE=$=v=pJA{nqZ`t+Crmop5SUpeSrPtMd zF51F&P(SH8X-e*71JoBO;vU#a)y}atnbT5tP#?_Csxmv#6AjPM$2ywl1PA?1BF6X} zn+4tC8ZLP_ylNCyk5w2n^sR`L-Y~-}d`=QOJMQgTWZF5wb^Y~&*(i?mv3U2QxL&E; z7*{IiyaZCFF)(tjjrTG}_+0O*ZVLDnhI6fR-Qqf_dj?a2TR4(MF=ArSc?~b7WH--s zcahErRf!mtq1W2y#w^tneZ)|Iza~#em+AXVV}Vo^^SBh}+Gx#-2LL;%Is>XVc-f#y zd2aA-;i$}daUKz{C&+r9HMA0%<^r$A3vjRHJnx|fR-J1 z4(k1o+WFP=W^JuU30*Dq!kzwZRTkgki3EoAA!(c+V!;o#Is(RX6Oz{U=7@B|+Y&ka zmKVg21sc1z&wMn$+WFRF0?JV0ovQd+1h4!vTGb`Pna}yws}oOe>a)0rzFvO)aTkV0{_kNv7O z)Cr4gFHo~s&0P>eGK}L|E=&We$3i-5xEFe1ObN`W+J!FZ8iPV~u;&cwEX^W^z%7h{ zMx^u@jrTz=v?drs+$n9(>J|(2w^j30%{(1rUQmBC0Jpzm=UG)Y+CbUQW| zq^QvG+Qw_GSznj>JfT<7mP+llA^4m{s|eO#8y#Z2i4c0NciHZrs$ije8eSchud4dB z#vgO6wxh5;7G`8@lHBuKi`2w&)SSQQJ;)-V#*54E0-)-Mr%05w1_*si2|VpG8>MX( zH!)=^6r^HBb51cADLuI*VESU~#;9AbeHVu?we5+J+QmLyIUlNDZ1me(_L;+6QsN-s zVq^31$4T?zG~e-~qjdD7qJ43U;s`&6k$Y2SwlZ%$MfZ2H87(5E>%>FSaikb)Wi+Li zD`qbacdh-tiaH#7(WI_#PZ%bmDJo3cIpUB@k|^?f@aRpAR_ePXM)E8k=E4zHyTnHl zOOc&Zy-IP~Az9Q{rKqR<8(@CJOX%Hx?$tLh2_0}Q9@I;MearRj1j#Q6&3&zmUE-7L zGASJ5MdXg-K_lxdD-)g-x5{kLpzb813lc?4I<`C&MLL~5U`Z~u9;kAFD}9$n{|MmF z{d4V7qmZ&3XkF_1#)4$COO0GT+0$DQ4aY*cow~M0O2f){6vuINiz_jl90%e_pxoG{ zp~f}p($9&Ds^%I!j*OI!EYW+Z?rM#_z0?QCOmvuQ&z*X43^omYo{JLhHM7HHk;{Cz zbdnLPd70x-g{*He>v>J>GVi1r&GpM{5LW5$%{q7+Dyk7k^fZD%f=DTnUDB=^P-k9# zS&Z*xBQh3^*)Og!6?K6sqO|&kx!CO~HnF;(l~La9Vh;asZ_Zv8Qp_9gajqVvv_l%E zrO(~YRW&tMjux! zJ)aeg1t`(57;j%M8sNulnM8Au(&I$j6^%5CW(r#5NMFOnb`5J&tr1=4$~o~7jp*8K z?8&o~MFaw?Yap_{IegRD`eq{}U_{!HZ9AAy(|IE#Sbylx75`YnxN^=yDH^eZ&>MOB z3UJ4p)0@WJ2riw*ZgCDiiy%aw&mca=b9g|H)2dYJa*Y4ic~4mriIMRr_rCaMKLKIG zw_a+mvmWck12EKI=UVm@$^1LCKZ^Elyv}(v$r99z9KBC7CGC_lZ&4mCaHwSO%<3;- zjlJ&AtOGrGU*LkBnWBmbGCWkHS&v$sWPkohy=?f7PhHdQ_%t{57j&Hrk(@xd(OA zbNnZtEOY5Q{&X&mU6}+_13vq%OoPmf$4t_@&b9Q7S*{1r8V6R7Z=~=pvk^c)WPwR_ z26td9yxYOij1-l`+))kpg!Dy<*C7$% z+Ct}*hKp&ujQ%E7eWA%jdzh!7vxR&1@b)7rji|2S9vnU|jKxk}CfH7G&(*QTr;{&? zf#{oej)Y8yjY|;0~`>u-KV=1na+Evyl*A%3Fl?m_({1(7fKD+FpgGS&`3E=Ht_Atbk z$2Z+9Zbe1t55%?O8&(46r+NVuGtv>YhSe=I=1nbSD^nyI@DXg2PE}LiMfYR0(rfCq zWZZOD*#Hn_%A<=FIqHiYXBu#J5a{%J4Htl~_AcSu=({?GLx1LqT}McEbx5%O+Favm*JbPR^E9KHilMSVSM|2%bDT9O`PIf!wahVi#ymwi`$go0 zbP>M;)k*D}bh+cIbaGn+%Faej-69h|l4`cOlrUG-)Hi3@-Yhndt1I(}cspTTq zMo%ri$35YF*Us<)EpF%kZ~sZ{+K`Djg|1)g^MN&*P?RiMQd`&V&UDvEBfe>9<;KuI z&8pTzaJC&OgoRm`M@rwOva9AxeA7Uu^e$dAzNvA#LEU6J^t^~83I8diJ%3Bz zbw=M(J(IZ3`Jud(hJ5?HyYsLtTt6#8rUC0b(6AtT0 z>FY)4ysErDVy8m{AFi_zP#={P4if~)>mAqG9Gth?-}xxL(GyRU*+9Z2lI}xh2l0^n zbs@O?ev;~$IY-1RH5?LO@7+@6d$)0o1Geic!G^5b^)cP?=IC)uRN#6SXGf~RcSx2h zbax>&jq8nRoFqvzWpJ84Dedb+gm_+3^2pLyed2nrcS*Bfyp9%hjd)mFU@z10e0HOR z=Oltw4r+|g?Df$*4&U_2nqF0%SJ7D%^y1`P^-WO{bHjv{8$vaNKS|xkeK$C}mlL8{ zyCJAuh61angl;gi8)7LZc+e;ZsRmIL#Xm{d$kQ=Rw z-Qo&DVsA8AT}C|YjcLf&Fn#McI&1tku{~}nnU~;qDOyVi8I2OeaOl3y-p^>pHx2C4 zV;u`r@>&Gv=npXFHwH`3VTm1!;^SSvs;fV&UZjQ@bUpdk^Zicxrsp|KBaQbegIw<4 zBRsP=8vFJ4;CfMbk_`d}&iKxG5ih=}m2ba6fMqK;c@J%miOr}7vwiW6Qg#PYACRua zH(iSO&33!^S-&X_+BzD~5+?fr%_-C$PQshg3Tcl^G`)ge5N6G4-(*Z*gjE++?e;6Z z{M;o zxSoEqb7>N#ODKrr9J)ou;=8VsoVZ469Pil{=|oBo$*^8;4ry7588aJAXxys?u;>r= z^c?Ax=gFM+MBn-37dHJ(&5=cRnBUh7Iymg(oV6J^)oQ9omfmvUCx@HXZKg#dzO%M6 zn9Zhh&i((cj@t1}ZFh}!(ch-bn=$BYV^FMPe^$-pM~e97w{diapvjxT`6CHNtI+am zAgBledSx~d1Cbk~ml|2dxe`y;rbaYc2|Dyjv$VX!>YvyqeJvZ~Rk&1Zk--$ASiNPO z;2jO{WUZM4S^p4l<*quD^wtgMNc_E-p=dZ3yt>k?uxti>J5v0F;tWsMwv2^$V9Hjd zwnKH%N`{%mQ0vKUr)Aw9>gQ@?=`d~?)-P{K6+|z6K8y2o$Gm{w1zePL6Ook=th?#H z>e~}`@T)K9)Z&}Pm9bu{53Ih(S)+P^U>HwfciN%NB4ZSHl%!dqV-tsRK-#3eCy*W!}gRisL@bd%$G$WK0IBBGq>mD z=*`i&iX43bPn|2ANA@Q!=u;IchJ+vX2Z?sSuHU<=Iuv(l6xUc6`=KLjuPnZK2!sFR zd!wq=(al=Fk)**DdfwZ1{QNFEhJNmO>`$3VTI9M|R^i9tCKny3p6sCq&Heg~r?+9rOm{91?dpDz^>N5>x(hD~2_@?T}ZGLR{ z)be?o}zEt zP=`%N%~)Ef{Y{~_C}3 zAi|pQ@#3sugELNKuzB5Q!!rrHycNJ6&qEko?@eUFIr7K*i%98`q*7Hdha^fP+__7j+z|AkW z^COf2s%m@vWAOTD3gYpv($0H$Z!WjXg5b~h23T_O!!bIaBvRn)-s@8{OF;L@`~Y(Y z-__h(b_L0yp!xC-YUjJS&_)pdrF<8LWpQ`v?9BtLj^*E8pHt#|tW;njK!JFyXH9Xx zCByh;zH>`JR+O{Hw)gs4?DqIM*2sPQDx&iYmIq8VY`Bq=PZ*Jxtk!T4&?6HYwh=?o z>8gO1KJJE(dx4y4lcg1WT&>y2x!Wr~4R?X88+y1v)}`v*MdW&6ZP$rL$a}#--)p3( z@s8v)M(O7wHd*2@!uFtE8AzJ)4wfY8R7Z zovh#i3TIA2(mji!fw++58 z2PxCQ;r9h(ng!gL@=58SPuf^1c)uc>mWTc1oSZDp?V7b_{Ek!@?{g%5*MqhcZ=Fnb zvQ_w5pzpmeF~b!p=x^UwHb{i4*R^gvYjCI{>f6i`+~tu4NK1%vs6bHPXZENsj#4=d zK3WzPbr>k>#@P9;q!QC!sO399-f2=f1jiorIhs{Cs`{fz(iB=lF~lb0W3vfbHme4) zY}DprRP9IOVEY?edbG@_;?Q>jwpX*xy(sd~TWsagL;(DTAoc56Lzh!@xlyA59o^0k zM1GZ@^uB1ASkJF4*y<%&tYfbQBTJg^iNXtj_vcwIV5pn{G5Y)PiAf59BP87 z&0%%e+WUOu@^wnjrKp9qx^?y?+-PzU67{kX1suXJxHR&*nyp8jk=OjKZB~VjA?V&N zu;F*xFxy|Y#W~pq?e{5sH4d`-9p+iolC}N05F}A{W$jpb!5Qd=KK1>vZ!2>?L``L5VS1i*JnMfX3I}lTWD+m}rb|5vtm{^Af zxSrq9y<(6wd7!A&Noo>aM$IgsQ{b$%vkO-kK~t1!Z>j3`Eb;R~q}DlbxBLVJ0pQ9& z1)J3hpc|Z`D+iKhJce7vdVG?CljCmlK$4cXa+iqTC|bA{&y{^}MDYybNuqga@#f0> z$ec6;HnlmQA4FSYbH4l@1Zyt)y-Zn2UvbJvdk^kMl0;96MNcOpha~9#3*Xhsm4F*2EYpgRt)5%`6Yo3ta7k zK{O%sOYuP_Ch#$xswy7PekO8Rtg60*uzV#;#0eQ!-t~jI^X54GHxK5bu(RbkxLrDs zp2}?A7aSqOYi)4e#~`baeKR&ZU*;uDZ=`gpMF|Ly^6-t7@-pYK`h50-EZCD6m9I0? zXsw&`3DRLeXlK)*!WJiu4SoUS-P{$Tu)+ee*-M|y8zRnplKtqcO9C`Ka^s}%4k9&PYY!gL5x_HY)E=w6Z4vqAlkPueR_ z-+tOA55>{&h&2x-wIHA(OhrCPGKTyzpTrcgjiX&$pZ8#0AB`Q9qHiUB!?C}~)}spC zw08Pt)!cKiJKF(^qNm$JwM^)gf^rTnIDE5O?%{>pxzmV0{Tfxv8lpj6c=C&gD#!9& zSkAbp+Ug65(lBipjAaQK-6&PC99f&u&nq3I^zHJU@#@Yg!Ol{ACsh`hyx{XR`UdM| zyHcByIW6Abhm0%VA1CbyiQ5-)adx2<*)D_N2qQia!?MQ<>Dz5QEAm+{gWB!9QL)4eLydM%0_J^n@6GseCydf4c~s6 zdKf9#X23kVm$PR(eox7j!CGaUm!T7bEpHw3IvZ{k7_lT`7354>s|xp!Y>u@OiT1$y z)LXf0Iq4^}6~f6hvlZPiV53^)-X823?Vy(lF{}` z1p=0jw%)q^zy7;wb=%BV_CY|mqXVfah+b>j92}E^a;%NFiBS#y5vik{kDS)v#>1)iBc9_xp(WA4P^(Yfl`g!I49r?<;wzEh_%w)EpS8>)I(uZa0b9}wW_5(64rp5@ArlDq#N4> z3JxWVnLiN2*Qt0h5pnPX1@~nVG@KSTVE zz!Y`}+Izy9A8=SWZr)(+=n3xYpyJVfAhx>aTJ01d(Do4@Au7!tgbJ^eE>kD16qD_?3FCvYKg5M(+=`<dg}TkO1z<-1Tao_bln81-oI&(p?ZG8%MG z!{!ls)_z1z;ZWv+#rOCL%em53us)Y(Z0?~|9`S8ERfIWvhh<{ZCF6t_M zFRM28Z;sv%zQyYQmVnFfoLQZWFK@cfn(^zQ8Ecm^@1d?#t^ejzjiWOhXZ)VtD{Hds zSCTWiRVYruN`V>1m@_M%IMsD^EKeSlbGs_(@s-;#29Ioz;~FUMFaSN~dk=kUJD2_b zJvP)XH0?ALYPjuVtleuPS^3xK zv6+{m;v9x)gmzqR7skOub9UN}q};~#JYn6^5N?Xuw~pkK)^8WV0zs)}32|#H*%6`* z8IJ+sWm~K2TtZ5=OieIB|wDyN%#|F`H!Hu6*+fG!Y$Qr7ab?* z__j_w6(r(5n|vzg0;4;&WFx9-XG^pn#h(0EYh{x%qC0NwY*KCqy4j2WTa0(2vmBto za4$U->n^@Z${)p{iv7G?0VRqV*S${kfsR>mAG+Bif+sMbSHA652)6!Hj&5$6mc@jn z7i_Ie!#*Mwtw$+P)HbS_oaK~a**44Ey?LK)-8Gu9=OlYUd}kh-wiB~3{-D{@v7kPQ zj7Kunfv4}t%-n2t@aez-oq(+H(>~L0c%u#Hdmx4B8Hx9M5d=P+%lMv0Y&1Ivy9N(_ zXk%GVW;Um7ymaTu%E5_xR-swvv}K?%5sfRD%+o$oN*UC6I#Hxa9KdEgfd4Q!t*VDB z9L?>h{CDrQ#cW!v_gQF_drB8*|I^>tax^}NZDgkm+QX%GqlqU8@?5~>k}qkA^`p)d z;;@wh&_e*aRyA-{wn)(c3Yd-3-{TluR?1zcv(jG-e8g8~YSsonl6!#gAb6q=eWYw#*)xN?R2=?DA<;eb2_yN0i3TiC zEuM%ypa?Ld<(pq|$MQ+$D$m<^J`smix5)s8yTa>j;v;bq6WV7oUQa!W=(3R&&^^n4 z{Uf=84?t&IrDb>sRH`3ILV}YOsWojep50A)7Q61HD81UjvTE9vv#wG`z{$jTS35)> zLLK2Dow@#+p?lr=NS@ueC@6m=0``xV<4>d|lnnzPjYWb;p%ohZ zXsk7gE*tu2uJ8jMe|`9)KHE?J>j!Kk+mU7(!!}greXC_Vnx6AE8trfb<6D!ZbpsDe zp}vhrX)QnaUH@p_pj|ZNvpwRy*;2lf13({~mG$Ow)zvkQfliEhL8P_xx&U8cy#gLWMe9c?Ibg5E zdn}!KSo^4NmXTN6dbTQZ1WS}pVr0(SWE% zN!)Z@PMaS~OnyXRcIR#2W8U+JK4gO*%Xxg?0xWO5ZsB78v9dKUVoWmfvA_$tzJ|v- z!ME~VM8Lw<5ry99#~eF$VPU9t^~1# zbPVRZI0Ym7{7_X6wV1}Ci%!1tY#e?qx^5#McWjWxg)|klk0;8!1Kb|H(*$O2nrGbA zt9MdON34MH(kyp1IkPDeNj(~3iV@jO!~^yOuxyiATI~tp#0xb(o;W-o?w`%li(J!^ z18RLdkNwxa&e{dc!wCNttL_Ov>`caUwv_L5wd^_TeBG8mo(nyN+0$O@lz}%J=pVcB z1UyG|4ga{~bm4ze}lb4%k+A4*}ROcRNi;KSA>`Bac%lHn-k8imYo!S>y z*v?{*5F9MJi;lo#r;(^QAFv(WWBtWk{_wzFDNXLy0@75TW1sK|!aJ1) z$Kw;(E_9u&M^6&xO;qjPf_k{X)+RsUsQAx+YmHC%(qO)U*<}t$Nu{n^D_Td|!SuJY z5iy>U!pH`GG5UD&Rj;z80_XGr8A;i4vCf9*G|+6!+Indjjo_4Rl&3us7;Kh?FqcJ34qCrtTiKBE72LAzfbo@Q zxsmLAA`xo8OJD67U#GW4 z?LCVeDjMRd$kdKj1y0?TE<=akSoAu(hYd6SjIR~G1@(CQ%F~dwi2!jwa=pm{)_&b| zqKpR=Z9a47dgi9ct*R>-*a;P$vi389#k#T`wZ&(0B6j0nfm$wlnZk8>xvEQw2*z;+ zIZP)&o8D#JQjc6LkeFW8=oN0J{j4fA%2BtdMZr;khRoSo76=Y5E;8#m*b{8djcA=D zGoB`9v($YTAZ``7++w-=@=>XWqArfJ?Ad2>U#9tleX{J!@VvnF+6F%9>pji+Kln*s z?>$qP2@QQRrv@@j%Ail?>c5sC8Y9t)9H3>?>Q~8HDex#K!ssV`)d+t!_Q{;mxfz+> zjej!X!9<$jSUo<^?OSk7^h7-F&+u=P2?D|~aK9075=zXP1vbpKk60^ut8tv>@PvHw zsPC~%ivbu0hAhoeRMcsM%wst!zG2ilQHYcoU{SYB5`@mx@30HKd?#6oY#w^dNk1nJ zxx`9#;#08}&EALIL3Ahg1_<{?yx$EUReC_gaSc>dLW8YnO#z6vqu~5wwi64&NzKLQ zQ#p5pRnoaX@TsyuM2ceZ#ZLCn4|*aKKXWMRaYoeOlAm+pK?^)ubcPMrySkxyVsU0h?S30oCmkuC`rnB zxT69uW{>BrSK4)5Wmv!V^PL+|I!H%sr2+;4%37HKO#HD~twwMi&p+42<_I>*WK6eY zf4W)Pknj6eSxO0{vCP~qZF>HPo$^G&DVF`yzCCsI82EHq4|z36tjxMs?;_9b4Wtx9 zpY|12P+@Egvp}Tv3enZbryZ3fLs!-pt9`mKHVfglF`AVm8kdi&_mWJrIb>|nxFQ?TkSl2&{|moCIRNH9bXWQ z+-Hl`&=9w9(^ssXr2@nQAlPyYLl45H^ovGoe7du^k1LLjhd+pJo85mLGpzs4Ng{3`H2iZ8!NwSpA9|h)BUgSwc%%TsZd?IwcNNK)l8QySrq9t_V&sMW8$xSN;F*RfDEPRr#cXC=qu+Xxv-cfSe zXL8M;x{upHc_M-oaX~&4#O!r8^qIgBaYgcEL$zbWQ5FE7=MFs*W#9F%)jpF`O-gxo z#%LCUkrqQ$J@sG&$3Np}HMM}TR@9%JwTXDC3`>)rNpwMAZjI94%a=HUn+Z|z*i!th zR+b>CHt@ASQz-f!Q>I`K7pu`+Mc$_6Be74_-c{+T8DZa+vpxBe5XQY~T{gSj&p0~H z%KEw&zuD>Y)-R(cMk&N;r93w02<&ESpUEx5p}$-C64w(*u19V^jBARM?&G$R0EzBl zs^#Lu%?e0mN`b_K?OUY>?AfNUsS7As_bKwu?qmgB7r6Ah=ejNVq|d?g48+gX&fnYM zQX%MW|Lv9vLrdjYKL;rvUh)w`4_{{^`31K^X1bP7AS_#LG@oR&imtzO@gHnF8X%Yj zabv2cUx1WOEIFd>!k%Txk!Ln@m^2b9C;JRo{!PsaQp~^o-3@EyyRZrfd2M?skw?PU z#nKejjb@Kz%ycgwNoYRlrpa_0^QGs}0b1-XbF^V^i z+Is1F53e`k^@H!W&1|1k6ezH^s@|lv+pb0`{1cTpYdcGMH;qshCu zJDLHHYQbr?$6DD1?jM{B?E(M+VPyugSl}mCn=O?GWYe}>Km+}bTPL2Bn38qu7C7Xz zrF)my>8c+rtgK%qa=xjcV=Gxf%qteAZvM4ay(7pB)Wb6x$6!cV+q8{n5LRGH=F6Y; zHPn}kCb1Q3xG;}Ld+CkiQ&*%iC2z*S=5w)x@?{}2DN-G@@}G+%aEU!P_&MJ)4|4(j zTvBjsQZXosCqLVJHuAaL16+=*mY=xIb9#(^E|H6B2!xpNvGSdWqo-{Ab51`0>##vq zFO4cT#zerh+WEV`v&n=x%mISdHu9Zpvxzd!lq0-~I(`#rWv7ZXgBxay>dAkx#XI25 zqak4NmP$)-Uy8*d3;X4$jk>l0bKWU)CRr1xCv9xEY9D%Jy?9y(KdOFy1Je<&uH=&p zcTU;b=X|H>3b`IYz3Fwfkqgk}B-<>Vi6dp3AnLcRXabH&^1bacghO-rW+%H)vd8Dl zK3@*iFpTVR3=QNvlH+-5Zv~tf{Jbx9?Tt3{`Pjo!6hV+b?;sxJ^c;x~gt}KWIRm-#NnsU3Dey=N*3zxejSEd)KGy)`(A2s4SDj@UgSx@>j#o4*Soo}PSgszJP{k+pS_xP9S zZY#d7b`EQAZU>aR@B!Qj;IUZ8iunr(F#)<*@_{ep`3__12^$B$;1h@qA1A@k7YbRH zf=ph@kmV@wxEo%MWC6wR4?Y0j!7l_GBeDBcwza#sxEaT2mX>H0FJs-0Z0rj@v9r=4 zkAESF_Wmhu;;>j%dDPoHC7?;r-}+vn3t3 z#hRj>J&V#TmYzL5BRq683-njfV=w2E%q6Z_C!chwI%(YkqnvbEUR}+5Spo$#XKp|G zA@>IbQZC~}sSAo!Ave=*g%%{c^E9)GwV&A$GPHc=`| zyyiT?WPW>tHNIE~jnqT#8EwrkW`+qe{7mngYlacV7af~-Pd8l#*3NpcOk8u~3|h>0 zQl28#05Foq#2{u7j@xp9L*B>a1!7a@Amoz@J&q?B4IM^ z(+Y{a7Qb@S~I+9P@MrmehX2v#44<5#Ks}$sBhe>C4A)a6U54yPR#611^Ys|jn zi?Q1I;0JBsOG#=$OEOA*DQ2*2HCaLoRqxnH+D_25;jASU9JW*WbP~k`@iF5`B7y^t z+vt}H4iOZFi@;d6MtB|^7l|>29pHG?+7hT2LE5P_fmY1Z8TtQ9{Ccl)#Xun6A zX?!Uthks-*iGGBSX)_y!l7oY-s+;HOv|SoONJ+-<#j2kWrGo{wl@>#se=$60a zd*D5&hqJkpj?~>RA%b>M*YvlZc$^6Mfcu$4an89!WZn>a7=Wi3mGE2%Oj zT(tFAuXjq@v=P72!7SKjK)^*`=ikdxR{fLh%z8P0E^cF96a+V}fcdTJS8R2LmS`h>HgiTi6oPj+GjZ`Cwb7*4AW6dlP zn^M*_$E;O`Dmvee;Ki(mTwPtrhS9Oj*8M+Z+Y3Dn$mnkp<9QD&dav>zkz#YA!^A4;%i9FO6awA)z0M(sNjuS*?_oSs^g$x;gq4$Cll&^j}G28~LX(8~;in zI5NZ3C7}qD>e-0461hkH?le&vjyw!LW0R%1>DF#*WPva>*Hz&;YX)Tdr}xZSE1;<1 zW%QNY^%08}aZ3!x`6W_>S5H@HVz~_UprT21qCwJje z8c$Okg-p>}0_wMc+=c7aJ7um0c&PNoR~*k4q?XRlAqovW`c_V#bDgYJnqcV@dW zVk7Z!wez24$y)oWV+;H6jE#P^5CAtO0|Rl9`)Xkx@Sup7;dfg1tB$<;NoZ7h#s!C-`!VW2dy;8fi9!#*$JP=aNo(X3qWNAg zbD|)$%#uP2Zj^T}_1DdWFg0Z?K6r&w=-O7iTkYUa0}Dzp56p(5H{xdRWQ|af>n{$C zU&|F>;c%gVQ9)Ck?~*l!N{}e)Ye~8HcskUfuf=*})TQ z!bmNFbxCOm4_n;=zLvX7bGeUe&-m9I#}p@*q!{tQ6rux~sKCM~*hveq%?kKZut<=M znQS9r)2VXRnpr~r7h(}5?QMN6_6k_s{+cgX?M=2=07EN|+{<_Z(!K_BU?u*PCA%yvQo(hmxPOP}Ge?5=nv_xlZqo)NC zG8@k-iVClI@se=Wt9P>52nWxJ=w|iPtIybE{_fm+pEbUoh%!Ah!y%7YroiAfo}r~} zl|u3>K(J=*9G|53*K9GX;EU^;9uMJ>?>y)cBthBVm&;Hh@~G~4Uysu#4#|)rIa6)j zn4s5__bOWwaTU+shl%Ka-7yVYWm_p7;cXkf8Yn>%Jnettie(A6NCK3Tvj2?|tDOfI zh#9b1E!PG1=~e<2d~|r&y6)b#vm8QiOiOo4yYiHayOP;A@=QJ?Cy#+|#CnEDq^Lg* zt);|%w{*x7kwHr(*!JH@njrb_K^ysok9GDSel*{3teJkyM!(@$139SKShi8-Pl zj{PdmZ@ZpU~fy4Sq9c1iiZiy+hwj z3bA_XHf|c(1m`?P)c)5-zUfmjg--CW)xLT6y*W4o z;{_7DAY8gpj|$k69I}Z5)qWy{!|hLgGjMI!L2Hz@7-_NnZI-sM6Wc8bXDiARn917Z zn@ge)W-Z3I2A+{i)nHG;i7dy!VBer$oicJMoYwd#ur376_EnKEx>Po4I^t zv1}!KP`H6(wpPKe)Fk3U@#`q?DVMG&=SCLNc_y1popGDli~^EewXFi^gOUK`CyB2v z+fG*27EmTt1q`-t`I5S8;fxJ@E3iUVy;nGR&LZ#_{Fd)uCT{3kcdsX1yC(NCeBrqb~^lWhmBIKIyo*ZsXrdRP$uAoc7V{0}t3l!sJw) z$(H%qZ`~gGcW$Tu^?h}DT-*v1evR%{@*RGS zIF{;^!DAp=S?wqcwXK&{gtpunHp=M9+v1Xz&48HLom<&G+$~(jcB}>wai?sjG})Zx z{%@Vwk@yJNKqpTCM9H$rS02j2POcBM=}-eEzvEc(_y5U;v$Sv~%wKNVNWK&OIi5U0 zE#JxRQ=to=7afRR56%(Vd@O$No@OG)vn+A4xZn{02|ZrDYE!wOTvcKdojkc_6bx!I zU?SdEqm!o*X7ijz%_v5vIx1Torx$ZGGuF-zIxlb7VkrhoK$_A{;+O)C&!TNwj^^L; zpmoYnV3ftG%a5sBx;%`jG?%*QWR?gm{iyfGH`q#cplP{kYn|LQ4*=qN=?#wQ&@{f> z8(H=wfiTe7bZmpI$c1*KPTkmEM;+}#^L1gvpAs6}h4jk*885Z6ZF;D#!d}0t~%k-KBp!Z;fb7F{#mqH=pyxBSe<9s?kZ{tGJ7 z5(DO)1F?}TBg!S_;TZT*JL33q|J!V|boelGg|RHEhGZO|_D!J2ptBF`Y387<{Ug)kqvAjvXnAyatt30EbxP(_Hh$C*;7zZ!dVUS69zl>M0{9a3J@Hg0OUMZ ztIcWvJ+_3h{~WQcSRV|2_JX?GF&hVGTeg#s#8L?oH}QfD*sRaX zIho-Z{hoBrhK}W$zMfQ@@tYHuh0mH@G!TRuI+j?oN0BMA8M?jwG}Pxjn|JJ3nHtGP zJRm0Hl3IHFm@k5drP0{yrKz`Ks5KG2a4(d_^R3Ts5^`NwdRZI;s5g5I=N^+ebjYajO| zIehrAEyj%CID*&1QdR0axV@Y`h?i8XFvSz>l+o=+KQ}8|$ZiD((i7k~23hS@?;?kp z?!x`!dB6~;iY>QNig&#!mQtxaT!Ce!^EoYVCRf zV$Za^O(zNvgg^HfKa)}E^+@O{;CrA-vvdbpkOSx$?fMGfqwd|KC2W`W@pqe^vBeXP zVJIlksa1VRluhT$0ha8aI@uJuRF|B~<-8kBDH*y-_fGikn({Ce{b(1X4Z;!?&|KLG zRprE6OW>tuxoqnv%C2g{o6~}AL}N;}MSR+9Kw4E!0?!t=6$SP>6SN(@5%B@T$1;j} zR|vwyoh-|SeF>NTry=qKCo6O$PO=T2%%zqXO!AMR@_iriRfzn0(pT#BM4&oZSO)nJ z*Ge&+i^$sJ<7($0iGE{aCzJC-jLn_V#!m*SNX1OF50vBzlB&pV^^?8`|PSpOV*6{_xzK!vR1KwAkl}l^PRGJd$?NAbusI695V|`CmrqP6oD#0 zz(o!cpM?<7LM^)4j6$j_rhzC(FG@(AhQ^~`28yqu4zHCm1fu##wBAF@vKH-Qs>403 z%-a1p&mFXl3eI$~V54lFbXUwyj?VLovuR23moDX-R87V(tVI__xM_)I0 z`|f>nSUGon70Glr*v(xV<{~IJ`#IG^-CQ(sqKQKpjuw0r1Y?LNMYralt#y6b7&K8* zy5%%c9w@SnAwe0-cigbKfW!@ZJlj)vN&?koiNc`}D%K_n%*6D<*{t->f}OHPRUTPl zvJTCB=d37nM*L(e-$k6XkS?^Vmil1sV%L}cp?AK`ma?#W)j;SK)oZz%8$u3+5{GoE zLJBPwOiMRg6ul$UT0S8)z!0wAEgKA>DWP3s>fd;;t=$D~?ByG_9#yfi5gpmc#i7}Z zpm4;w7;fm%%KZmzt6PLb9zh?$#C8m58mawG1*-&jx;MRGb}H93Zag+{s;t-9%93XA zRP60jxNKdsp;I{p`=@1PfXCS>$4Qt63E#6UHa&18wNpNuvyXB|uNw9+_1q5#x|)SC z^<`gI0 z(#swfL-fDu_cFV^eJadHGE3CmLOEMJ<)}Qn9~bVW_>Fu6c}%ul)Yx~jPIU@&ho{|5 zpc{QTQ}&GYs-ABlS?LG3GMgvHP7c*Qg25yN3$~W;q?IW8t@Ug|=O^C6r+n{@-fNqu zV(Fh*{Y3m{Tc;A&X55@_I~xlt%?MPzQ|5P77Q1pEA{Y7TLuF z;OW2z5zt#UR6cug$lUO0$A@N9c3>l=F#>f>!}Zlp2S#wjj-C!wi%lL|XRH)&qTWTh zHhwyXpvVER|53#ixgJZOd7H>)N)*8}NR}t_of3Gw*+K@V~Zn+K~eLr@ye}`2BX>Ksp6@DMUCgx@Dda&ft9-B|*d! z){ieT(1d?l$vza4(Jva}0}6m#?_OX8N~g1)EgV6rypi8!<*tYeikG|@leK`xMD${- zfU|$<;G>Y@?E((0#Sz=7I=I1Mq-!-fy3hE^5gG(V@W7dzdMuk^RtC?+qU$nv!iLVo z+CfBIb}U1s;WIveZ@_aYzsiK@mh)VM=gfDhvt+0**yx#@$afHNDQX`=09&anF{cJWED`3#{GQ8rG>GB}XU7aUG3FH@ZFS_E5b5^WjIVA44T- z?gd-PYR+sh^~X5f*0M8QGs)#u0e6es1?W22XB$-srrb8m>O8^Cb+2tzgT-!lyOg~B zG}?`w@&tEIj@q+vHcJ68aMl;>HGg4)@pj+b6u#tV%L#BqXX$NXq}cFTUwmbW6^%ce zm?7=~C4BYhMz*8n5i<-fTw_sH7dMO%#DEMfoIw*BZCvQt@5A*_S>cPg!(#Sik(sbQegIcP6~E_ zHt@W|sLTL^&nKP7I$b)3q35eT&75mOFWd0*#S&vy-s7@99m!&1LzJTMI!AJ?H09jf zH5+}tV2CK8jTH#?(Cv6B%a(PT{=8%1>)&J(*|Nuv@dF!Wd#PHdokl5ufUekMH)BBZ z18qf1Z+w%rOaFVOp~beC^&+-<+S3;>(Aa6xm!D4x;rB~a1lGx^;_lRaK8W8EI;kb= zmF8A4AqQb)M&6IYN*T6Neb)UV^rUO~E`ok&>M*m&cMt&N`C&}8ksTdJWk&emc$z(Q zNDyvS?aCP{{N%Q)@-i~t$r57iSV2S}$IZ_9f-9ivz`1g;N8pRWbL9YzoU`X-a395S zO}624PAVQyXzY>ry<2e9&IMl;<{oZA!*$)rQ)k;DR%(d)f6j8+@x5~WCA0QqiK#TEI1xF+1bTBTWu4%s)H z{nkF0lxvS%xE8ZSupANup0cIrWS0W-EywRyK8uZNr_9z&7!Gw~W?ly@z4D1$9XEOF z$KV#+>UO0XDJEKLF#&%&V(Wi0YoSmMCT}C>mb8YpnE<+>1j8cpn{Abmhlkj9RU{mo z#F6YI++B`JJMSX~gSA)#@x*K>ul3;hSn;$kmTjyJlq&nHGb(7@1?jeb5?8J)C^pO2&FAkmxZ=ksET*i9joP9j1Mj&Gt= zIlu;=j4IqC(W915VE{-Jnoy#raUMzPh1U6U*&$0C(YFYnSe1$(cnUAHSj~r&^-D1_ zxiH8aa{0U?=mX={DH8=M9kXscaix*dX0O0FgG{7f3IYi#w`gGDQCrLRvK~Hd>($dO z)?_2bbx2Nfn+anV`+pHJF%4VibK}56<1qkHFFo#;bW2|c zr*rwUy2z>sHCZ66pVdVK$t2ZtGbEiPonBDJ*rtCtTOLCni=H7)u&u_*uAzvD--d%H> zJ1JF$@|}kQPx;R91)r@c82^zAF__m`Endz_j+1qigY7yvh_jlFMep3kB<&i%;8VB@ z;l9=5_iE>@`)nd%l)M~6*$N&Zhf9pD5gqO&ruc>2(P`rj6Bo6ShlyitUvRXZA{IZy z{6bmdZ2U0P+M~zL1FCB%~L|dDKWH7AOi}xOM&iZATM3j;^M>*tI z$^ba0F4|ho&TDNwDk^>`26I(Kh#M8*3T!i{52k7B!fpE>a8dbh$Qb_r^v<>m%1l22 zDcp&196^nQE{>K7f+aU+?tiHniu zUe+ErX#jt2{)VVPsiP+^`nEz^XpI07b?gDcT{p8A*RLf>YUR6NtYn*Z+}ag1+!7rj zkJ)0hG>iQe7p$cmIuw8tez}x~Yz8^MX`KKV;cvsbWh9({){A-KitIGCAHW_Yxb4ct z+*`>;tX<5FIbUM8truY3isU5M#7nl3@7x2JDVEXiW_13~e}(VNR_Tuo3WL2}L9W<= zJ7s=2{in=c@JaS4ZUZme&2R?qrb#BF)eE=(C)w6@oK!;u<{Q|>4ZVs{8HLI;_T`1Wd@q)Z&km6FoL`hwcLtV-xgu9Rrnj#$xT`IKT5(1CmctUqO&5<#3DNg|mZ;%4ivZxqv z1W=cJdrsqnJC^OboL{6sMn#u$@#aYhRF8*!ep{TlRQTBl=ZV1xb;!?Uli7}RawI<2 zrQ8~23>dpf6w!>Puqor1YTW^MHz!;*6dJI_Y)qU7CYoHAj@wdHo<5*6aJdvfKzt6Y zK~QX+(gb~Xt$V53A}&M4SdrtZ7Xw#(`F;*1sSfu9+2&v6R=-jWMcVja4cB66nD|q+ ze(CoA;k)m@cZAtSnZ!E0)n-&*?fl)}+t#IITDj3dn~+;&e{o56hf&)p6_EJYWyk8# z*V@44vX3Rp@e8yK<~vCwlmzJvQnjH{4z5L+W3}eP`A*JS#~BsT#cbqqfnYts36=Z;?=YK)RyS-hOstsFd@1=(THNX^SBTP{Fg z`$6&>cTVd>SNFf!x}|xkv!txP>^tHSax3<$iipa9o|D#A@}0~cxVf(?Wx`rC_Ru?Q zJ-^CSL#lnn_u43}xUeM5B{rkR|2bk?mwktEPJ_(b)zDt#B-<%#LR=UBY%e-!v$zQl zyjX|=halD#gD(a?M1-$!zZ!b6Om2@;eK?;mq||yxQ&W4UtoEX>>T2iyciL$Dma4+W zqR1ig^^KR-B`;uDvPrJL=zvQMjDvSP%)Vcvn5;_6zeZ}OtJa9d7T9^s7afUPxOTpn zYlqJ~v8LJqAt`(2B(f?`q}stBpo|yy5UFIA%h$(cz1+FeqdWQc;TyUE5|-iIa(mSn zkxWMaN-J4aBPE|$3y8~FPLos-Tw+VN3IRxT$-5DZx~#P{N*jzK9JSXGK@a5GZ|bB^ozEH519J?rYq@BxzOVgX{FSw%5&6Zk&$AJ!J)Q#@Z$MC&qLDeS(zYDK1WA11 zI%QsPuz$(AWey1Cget6;ZJpvaBSRXJcUwOyV5}oS)=H_c%k3T4%A_G3R^Sd>FGFH# z+-eI<+NiozUe3)@0x_+)WdwP)RlOrjGSe#A+;+YT^Z&SQ?ZmiNI}g3n?5a-``H%ZH za5V_~Jh6SQ24;wCK4(K$12eQ<@^z4L)o>JE*w32hXBm;|rgo0&MlC+y578XGni%4T zx{XDTo*{kwYMvkRX2hwkUbS=zX^mX+dNz@u=!hq$@zrvNJgvA+lAKabQ3V=VHw0)A zJGYtdq`{GGtz>+ytBE_~Tn5`$lUG1NLCk*+-o-m0NFVD|Y3Zut%zyYNTMppmb7P$t z;v9!mw^Y(mD++-at(S10;4+HmS3Yqy1wB^^42Q61Sj$fe{)fu3dOCx4cq95FWL3j9 zOQoqj@3XC{7%Yu#M=>G!wvo>+f(Q$4c%H?dwBXQ&{y%8Pj(u{DJZC;)aV@FBftx^ZqycgUx zv2X2YavzSUi`SA+=?<*m#7hCVjyzIC#29fnnLR+#7KAnUn60j?~@r(|R zV#lq2&GBUq_xI@V$tT}yYgtWUm9Qo*(=s_Q3TVv*O>%GGZ_a0XBDJC&X86=z1>TxYb5;6rNR ?bfJc}Ghk}Crl`5e`=BbiwB zZK5>(4mt8h<9|=KyGAsGqQTkFW>&(N0m(|Mj0QeQ_vGtLQaejPebJ{a#uKsqWDB_z zqw(Co%N0oE_4EZeX`Ps-`h(WZuMu8XszLH(XUme4AGLn8g%3XIqhCraoIT{4fVHZ& zTgkhmVEwNCE-5!=o#Stw}2PBpX_IsENLE*ms>!3N@q zV+VDE4c^G`6V1qgnOb%;mMeH@myFp!zEe06_cMly4BiZaj2uKlmz$0W5C<1E z4c`o+T;xkVY$n9#atptiyZfC)JgD8w0m;e`R2eOOD|0nlTuQgGn@LF(5n$S*{*Pxd zF^p0+Dy~~S0N{k4mSk@tx{>!Xc`|R7nLz@rKv9kIVjg>CVjozu)ZXPu_*ywOISqHM zeKXkBXE~1-OF{0Kkf){iL@pJ$_NYX7Os@9U$r;cU0r`QOG>IoXYu$t%Coatj@*1+F zmk}XFCZe6Jm8@TNMNF9ewsO-q7glp?rK6^DFt3+6!*!ghOa}SfZU#5QL~K@{72h;O z%v)s*_7K*2JIAWr3}_qkN$$>sU%FM+_$EBkz^&Zv@G0;>gST=eACP~@Er)Jy&W5uN zqv0Fal2NgnQJX;Vm+qZgw2xzei zzLofc^i5&cCrjTXDm`kAY*~XA*1iOBZC3CcWkjtQ0&%Z7RonULIC%li*kVgPWC+qSDRWRa-^m4BhC7l>_HMNHaLZdsWReB0-E zmgEVpVfl9KbR8ux+R(Rsc^mNLRaI<-sVa~Dk$mT=sF<%?x`Kb(2SMy68;zgSoYiCB zc4T^7&Sv9T%Oz*jlCGs*dgUM`183IAp- z?TW~wK!js%e><@XUu|6;7pvw)Xd+PcQZ*C>Tn&HlyV@R*vmNx zc8WCCiU=rQ8G<)tX<5L^IYF~oFOYDg5MrD_g$f8L!Jd=w-(} z()My)e>rgjojmz!QLrTBZ?|^U--~#pisfMQzwF2?)7hoyM%M!J*2e=~?wztu6ykPk z-7=aq>Lf)Zv#jjVz5L?jCO9CCmdj~B=18e%IYCxlPSjSGdJl%X77&s)eEsD-c6Ik8 zp?w0Z^Gx!wbEvmAOJk27;I6w>8q=A9@E0Xp%msT2m3uCZtg+9Sy^@U|ZnzKVD?Y35 z_OSGWulVxLb6I;Ow-SljreWEKUnz7dLI=uzY~&T6Z$iSs*S!*mgp3JNGn!Ax^B{y{ zQCCz8RxG?F$Fqc{scZFYT?`}U*RW2U^ie_r79TPlD!-BeI!f^23jl2B8kQ`+D*dda$3 zUL~cCgdeb8nc(S`1V8)yQgfZpf@6OZen40fKup-$^Vg-?bay>)2{C8E-)VpWivLahi4# zW%bqE4pV#5(SM?Vg53lX^vSH`>|)aGMhvEfC4RGfW==)$h%0QZRYz^2z#=qi-vJ7m z&4~!dwphK3#Dbc_r2zDmXIYx%@*S#=jL+lTcH;XJ2k;p0=Jzg`AsrxTIjvVs&kZK4 zznaK;htXiEq;Xiua&qTAV{2JXQ+5XE0v?rHuO<#x(d^F^)wH8K!)D9@41sNBlTZi- zw;hEeI($0o?_>!%Q`|DUovRDCtOzluC8WRYNHGQbciUI&o_p^3GaI^{tCN9>pa-`d z3$)kJJdWIUtl}K9TKO>Dn%-@r*@cuar&%?nr5!7c$;HMKmyAap#;r4^o=?bGIEN>0 zJ0bel{AZID&=41pM|Loc=>Ex9{gpLu`#TPV_drXOCh5WGiuv4vVs0jztC{U_;e6z8=0= zuq?iv2Ne$baAw@~$^;U9hTA~Zz%40hvVq`hql?Yk#dl*`>3no*ZIv11s&K}(Vgj^q38N$Cno&v7}pr@uaHFC*Ct?T#=n#2kYqntmejwK zb}pRjn7;4I45JC4a%i2f$?x3$-~PJ>E7m9j;;M!pd4S7l2+AZRDaP=q`I0T4wRTnK zC_L?>;KT1t2%h9y0 zl%}ae%+8#@;K_n^tpDkBIc*7b=|<@fj{mf6W?fdRd<$x;3}B@L}~tj2L_wW zB4vx5gQzvCYAmGY0dB0BQ0rJcZLRP6$|!&sTD$TIf_~B#v$D&@=u@Etm%dx@Rm4Hz zRwPhVsG#LCo*6k0A}Gj`LD;1w*NJ$QF1uB`kRZ05gyOvdsGJ3p!1w>81wYw*DSNh( za1wzO|P@?_b}n#Wg_PJXK_=Hqj=T|sqGf;P8^+R4(G z5g@67K>d%tyWTBZhk>`vSMW65rw{F zwP>12z5=H;nokN(cgDu@3CC2n-+0yz5mKB2_2>*BuG&PF7EOE3ChsauFinofoyVlCm9_zvB;Sp=ccYJ~W+7{Q6;6|ne`57-oW3DX3y(pVzB|yH;T4MMt z(pH-9#_as%Us`W1&#HO2v3~X%(j(j^*-I;HxtD_2u>sZs4uZKNI#{c&zuf5AXdBst z@Jlv{iW#_B3X!1JO$)ZF(ZRB@XJxFm9UwjU4%;apy#=1zzE^hJ6_>AI$7uuK%Z-0u z%?7`hm@o}jZ$nv{rMykVClMtZj?zltZX@w~O1HCGmW7}616P5!Mx!-9P`ERcw%u8E z{CkNz%7%&_|9d`hGqclpQ%qz%-HXuaeJ^KQ)_D-%MifOvi{P|b1_Z;SaQ;fT(<%e{ zzlao;-!Y2ftn$CM_&rDF2i|T=70~CL;TCtcT$-0rT_+2O+-mP|WwvfKuZWA*%kRkG zQHzL?J!}0k&N>1#Tgegx4FLernwz#3V~3;nOTc>ch=K!NVWW&zni_6ln|C148bBWj z+g#?Aom{qBdf`#MoByo%Xftw58)*3+U%(iHo_O-{rKU{DzQJ+o)z5-+bFF zt%BH+ghDoLE8dAyuFUne6RvVv2`-?6mDq`ONmV~W;~&HtV zJ&(ko{)5CFBFyeZyg!lk!_ZiotaCP5wHa~g2+?JYJMWOM#5y$NU5NwWq_dSn;{a`E z$6ajp&>ZBG#89VgsQ^Vlfg85`gS(enL_rCz&VlNk?A8GT&q$nO-I%g@LPh={3GMsl zte+(#nVPQJnCJ#D_aPNuD-D5L@=KAO;(7w1u8<`BAdf7!qxHdTmP+U_b>kHCvh{;J zQKryp>t45AUd$~V^22NA2YDhXJSE<3`^z)~SX`el9RjqSjlagJobr(fOIiSMF1tKyH46>X=z>v zL$uhc_j8^N^fcseJKk~SQ}PUiHgNAd70k-(Y(Gl)c5$;op)~NL!cWL?Tg!aj27iQTDW3Qg(=CgNu;kFwt~`roCnD6DO8vP@x*N1;FdJ_4>RHX*zKyhBS?gI;t9@NcAEAR-v&Ar>mX+xBuC2X&pv63=xQEdDlB4GC9K40;1K>2m^U z#!9LBl#Tx+XJ>i_`xOKiIXnUjOk@M1es6d<#+3tVH-d3jXHa1fxGZ#Dd3({@VDF< zx5_)+UJ}afXcly4)@&!Av}1{5w(jV1aH{!)4Xg*cNX{bpiMZ#%_)c^z%+=Q8c%c;( zK*L#Dbic%m8(DYo4$tnv2(6aIyC^C+SJrb)J;LHX8(Vjjeb+nhxAEwMoY6!*DZimT z6H(;JSKVWi*_k`wuyZ|cdj+msiR0LBuIHNVnN`S#R_WBE8)LGvUKlXh!YMh@Vi^<4 z)N{6Ur%BOEjMs1t`;&17N_Wji%7D6sI|i1~br8?J7~0~$~S9$Nu7GLt!wX}gSbo@*{10z1** zd;h}hr#T^q31mT}lb`x3>^?XfN!bQ}>I=pzBp(^T0)K}phtP@3volEE40?NEX5cjfp1g_Q=z_7ePpm^SnquevfV3jHt7H_Sz zf+HbO%YW*7U~12tZRC8#oi@(QxZ7>U+|LmX!?p^P>8g1eUSd1niOsuAw>oEc3Md|s zOPOkkarCoTCDOm*CaYWBO&j=`W5?Wlb5<64gFo{{PpiS9tiX}X=@C?VINyo=!~%hs zftPILXNfjkd1|E&mR&5nNP~?=tAKW$r?D)dx;i109&h0loBvnkK0H*F*P38ET41_=-X&M zVHOB-jQu>PiXE)@Y2)Q9J_Hx6{`1@a`+ry0wA!(Ws4BX~jY|`j1 zM77Hm2+9~3+N93UeP7JVxwM`nHUZuA@3w)WSHwi#`|;x0S*4oBN{{7g&RY-bteBbBzD3TzViK&7uh zNbMK7Vh5+?FKq+ANZgoFLaxDIBo(1EIi-*a)`ouJ81?2SZ1@*GG>e!tWo!6c1I1+Y zz1n&5udMNloM6~Kg%Y?6XwTxnU7+=gL=fzWoFjug{35B1Yz`NHk<{k!!3Zs}RN$G~ zjXUvjfd_Ff?tCtQQWQ>9;^|Ey2s*js*zm(oTYb&Ma-LP3}r6Z|mZM_Ud zGSS_(ku4P;T+b9;!6MhfV2-w8O&)%`ZRc0_zXwu@zjjI&gr9ue?3YQv>qr@X>8tI8 znQZgFEW5~sQu5pyio%U4BJB_VG6{v-amCRy@=HgCfB6fmWmDKKp!yh%dPI%Go%ENv zJg9s*TE|QOQ%$R9r7pKq+zX?zSHH$4vn$OVwvKLq(kS&Fc#AcEnPhebS8z`=()wjN zJFvQKXS15N`3`d0ewjNI zt-fgD_AmVifk(HUs%xFk;+CH?`;|`w?Xr9WzseQ$Q1uE1V}rjcmX8E}2O5zcr(jO? zkU9R!nw3)U(IOE$!LEr?UZxz_EN1zjClUCkQFWlPHGh?O3_sV3%5m9ZYnOtDr^vaw7*CA# z3e4?NHq7yf&vwL`TP{OWP?=5^a60o!B);hu=-qDm6b;?7-mj8m99%fyfk*mT2rdWV z$Z;gN#I^lZB5Jeo1lM}FuC?e3jAHYz#~5Z&+S^7lu3BnDb(WKID`rM=w# zRpLYh9JifnP#_nGAkN!<9f&ZykIUo0uYIzhy9aFW*Rgh51S!)!z3i`ZsbSI|wc&W8 zp$p>ps`4^@ii>A08doBnIU6kxnE%>Xmd@<}0Vj<4uY<*#vih&f72R*(Ec>s0B$Wtf zvJ{Ab(R-{>UcfDS-pyw8#+0!?TTva$W101`^1n`mN0xxSl#KBgb1-Z?Ou&|YT^1zl zD{(gbwIlqaTKY1OS&uOmk;;vvWfV>=?wju+$e_(EFxlHCb5z5qv_?! z(A>xsnwQu^viVj(#2qs7s)`D>OQ}8BXzW&Wi^UCJM{(Ip8+j#nk)#wDu5JaFmje_s z%92vFv*BpjB=%iaV1|54oQG$MD^RbDKAk67QaGmqGIzSE7Q#97~oPTz({gZ(B3Nh5sb z0#@mK2YwT)E`FV$2dhU};lbbd?7&sq&~I|DA0SMh4gV&|=!5to#upIf7I<3wO(Lfv zvg2SH1#v=sqmBJ02TY_1L>D$*iXNVpV?#Xw@NV!)M2Lc!!z>WAhwHI|j z_O4T#9HFHPutwVPb`(U{W{c(1JVb(RyHt9IgW_OV{!P*tj7YfWvd#&PB+znW5XN@{ zoPYiR{9`XW?t(5Ct$!ErAr(4pE5AwZ;vd;mYgs@Cgy{SYZWilhPWKR4-ZlyZaB(h6 z+blyv_j<{;N`G93pSSIRWH(nH+bPYw88+1Hw>itAb@;dq{MMm>OF%$5_}f@y{1sg6 zgvFo@{Wb`Ju4XUU@Ne^?2sT}YmdpG~fj6M&QP+*k6(v?KF16Ag+?}jXufcMUmUmpd z6q@`R$KF^~cM5a7Y4{8d_uo2l5$xCMzx6%!DCvlPo9M$5Ad8(WZPC7BjVsejqZAR> zjho?b9TlhCB>lHZ1)R>yP_td$Oow$2OnVGhSvJJjMbH2IFSeZR>?K!xC(20khLujY zbO`YUr+QK4Z8)682;_CSl7Pk~G1%O-d?&Dp_R5kItGcwjA7@9~$f;*jL&G-9x3`Fi z+zNOailDz;M!~r7w$z&cHVNXRWYLV^ME1MHV6?+hPujrmVzJ$VR|O1_hUScI{}%CxMGmQX3$*GoGD@|{AMQ-%WXL$JqK4)jyD6^(I@ z>0Bc&6U1u!uEx-c;+a%l;3WuJbGrY&kn3skMh*PlSN+d1D~`N{92(#*AkPnY_vxqtC3= ze_!xK?v-45{0<@BW+M8TC5Y@~Ksq;Vjcfweku%EKV{86CSJ)**xExi0+(5@Hqai|s zWH+%OfsUc1Tg=k3PSTZ%6y3J;``~+!7u$05{qA>JCrid%+~Z$Zx9Urlqk`%w>y>wY zA&RID{py{pPJb=MY71LbfY{zIbHt~t1-k$8g6D_>=;l=qvlOhK?FDeoAi)Nxmkl#czur=6Sr z1sg9Ah^OHUk+X9B54q69WKeE{i7cZG*eATq3pSZ;Jc|QqBcTxdI3)iegUR|m)2(7y zZq7HSPS0EG4~h2g;)rSgAvq#1>{1%$N+|^wv(vbg1nFC=B{3-G;z ze+)!%A5!1G(2sbhsBE~zMQE?#KgPL&Oc+`|^2Z$8jLvtIgzR9sIBje=8pHS zcg7ENXZ+xYW5;r$Bub(rO5!9MqImSc5A?tf^uV(SFd)Ey00ROH+}Hba&iUHe&fM+X zw0QLUd_RBA=im99|3~-#Y3ub)QjBK<)m%LG=@jT`xPp5M(t0Fo$ZU)AAo=#_|0stR z-|Jm=G-^M=)AoXmRb!HmqO1)eETV?g7t=PL_0g;#{{J7<%S31gbXvS4-Z=rDcH2y> zIu~&6f0V0;^lCoFa0;Vne{8`UZR;i9^rHX0M>$cKd_nMbhjL#|u#uOnL??u*V)fi! z%)SvDTLMdhk>Dcgz7YtEc)(6LexvMTVUIa*4c~|xw3+9i@f*J5XJ!=uw<&%pnhD-( z*^x{wbW~fuQ5Nxla8GND>~6PyG1`2f*&1Oc5QDX6`;sS-b?Wf%{ku^|_a3y)1Y`%_ zdcAK>#hW&eA3lv}WY|Ao8HS;Gz=x|1G{fjO%KEmGoH7R<96E@sYAmbGDuH$_!6-53 z8EQO04@na|0KNs@7dqwXYDvQm3mcr7Z^Y4uqs6k1ZJw)YdHzHNs+sp;AxREkEwJzn z$4!BOI7wpt^q4N@^tPfDTB?ANB2|>y`KFa$79e)-&zC8!^oWUK7iHAKM$E(DY^4GI78Y`@aEUF|c?e(GFIx0t#_^!Jy zn*?bQh8N?25>?(t7Gvqj1Wts%-Mu!t80WidhrIRiFG$co{z)72CE7E%(`r5ooK_@K z7>|KYqoFwsYLS=AWR#V+4&daWJRNhFRuuhi{L-vX+FblnT#ILH-haVU$w_M=S7fk9 ziJ2DjC)qO4Z7w;8F3wGGiKo1heeA*y)9jmm89`XQqjlfR*S#bPDYi=W-;5pGiW72t zFIk)daW#H3F!HA%R0%-Y^vzhxt)c_Hv0^e=i+_Lxm0~7a6(JX@nQxYJwjImt_6i7@ z82O%deABn&zowCqbvoc+SJ~P3MjK%f>;<&*Ht@}SONuxCaKa^-)yOx)YpnzXC@B;r z-O1@e8It{qUN9<;GENIZ6DMBBd{+A6z5`&wf+GGm?yaAd<755U4?$>VxfZPH3B60p<0YhJcUi%U7OiHP3R;o_U!q;o>aV`5+;`MM6rmXH;v4r?m;<8`=t#TAlT5##V6<0#e{3RXb zw_@Knk?^O-u5XMU2Du_0=RJA2+5za6J#+x`R4kWJq%SGpHA zyxHp^O2bIVTB@ENhaW83A#441kT_V1fGlLJ-j>~w>Ks(k{_S%3_B={@rw;EAfy;g{ zT4(kLLRVTV*?;tYJC8_}3@lK|K-QM8E%!lgmBZhT!z^xF8tRc8;4{K5qq!<_qBt6J zK7&Q&OE#7*(PowuzqM?+zX#i+@oxumRCFZrOigA_P22z0rUMdTQwjf{gL0UOGQlP2 zh^}RG-pKFZe6C6tP8GNj`XVF0v_)San@dT`)bp6tG|?64i@Iz@m5+xRrUf7UUP%7nE=ncipog%BnZk0Q$Ca3PZAO)QN%9BOohpyAG_AWIB4 zb09SIB|pDHi)U;gex1g)iye0ul&^Lq`<785#NczR(x_Kf(hvxyqgCg!&UTxIvFJ`J zzGtkKLjlY%$^7ssK?-{;LGGW#4nz2^S^$aph8wH9WbH)-0NRCut}tSPOT`j!NHL z_npAi(9O7;e-qd?)VJfkB+<^#)|W z-O@U@cvYnRpS0HRlsRqY-g})c7rM3kFIEmsgLQl-@YClZcsjlLY>mL)z2EVp>X9E@ zSTj&nhFBW@PRu^uMbvotS-H4JqrJ#w5fINog&zHm6AACIYqqgm4A`hJYpZ2Xdoec{ zuX<91h2kD!lhq{N3Go3W2w@gE44IimL6yyj9}xZt8L$^_3V9CesB+SJj&V_fQ(G&PZzJ}k8E1^Uz&_lf0W6?;AsovAG z$nR_4jVE!(e&u)__wlxFw#o07TQXbwWV22C>J1>hok=j-L^01*V7MKoZ0+U)4A?fP z(*G9=NYu=N!MBbHZ{G0ibvGB{Bxt_X2Kwub5ih_ws}g&;c}E zoXWGxPK8{xhVS|FJV>mg#_#z-;S3@+D9S7LMxxuM=*SHt_?IWIIa(r}vX=NRPwh9L z4NtJtt=}t~ggbEC_sY)p7tf6L@0Fe9<&)D$hj;SU|BXvk=l9|W?kkxgaU4tVAq^nK z%gPNzSq8;cq_g2DJ5wGJPZ2xvy>hhmZjJh%A0r0dQ6CN~*IpY7_(6_V%aOP??ooBZ z#=n=+o1#UQRAVyNj?l*J^K`V5_*=>lkybM~34*dfTOb4W$%{H7E|E`Oe%xU8g;;No z!cb67toNeV!w#OT;I@==Ml7mzI1PQc?AY!3_m+a>f}T{~68YZvt~W0E@sZFM%Mry=u%@MQUZB7GtU12Rs#xB3 z$~NB;Wx7qW&N>C+Y>n}-li(F?iyEh97233z@mu>+taY(%ESROE0upJkb>Kx$kbMH9Uc}c?K@G>einEK46Z0C0+C`D6^+bX->cin#E7P{4KFitK{eNj= z`S-{F+-ghZOn-i}jeF@^teH(#qfZxq5S_{C1dtg)*v&*0m#|AO`-08+4jmwJ`Mm!* zTl){TP%YMT5&(HOT{|P_Ktf)kyn^!4Z7k>LusWwf`H#Z{t>khcXT+hI)!|9&k5G&Gb!}kLf6MoPd^M?X&HGMxeN^bT=Rb|cJ_tS)3KLlFK z_sjVh)cJ0Wu0l34uoZZ(d9|f*idlyrp3w28oYOvTnr|82i5O@Pk$hsHEC{ z(Z+KWmPzhPxF6eOlu5dvm>tsCPhTq&oi0N%o2f=6XN+dVI?a7Q4)7Oy32bTeRrk^* zx*}`g``7C?JO?e;)AgChGZHNn7xqaTtw6w9w)FCp9gQ-HH|4|f z2KaC+d+y%{PVceW4$eVg=Ov&6K9Tc3_^lDiV3pc>-M;$*4A zJeOTw%MF1SX;j1mpcnmb!eYh3l(&I7onl6rH#s%7pB66XZDD7-i+~a5}`ZH2ts~ zGLdjtSTt8XDOO4$W3A>m#7N16D;xF%cSO0Xi#VTu-s znM(ecvl^0FZZMF=(j!-CKrLe=bD65<$yv*@!SCxI<_IR_Qqa`}fr;tv$} zL}s^=(9)gI%o`j@yBw$hiwUK=6PByHoIuKSO}_HVut$IxtG^ukM^LCiy@n0ZTw;uL z25ZbNwS3Hu$Zpnjx$MPS4Ee z)CVA?lU~51Q9~MiPZmCDV^Jm{k&Gq?m20)jai9pe3xPJClP%oGWHpRTR@||0I?6b+ zMZ_b^t(nW$zZc^6cyyJ9E-v_74x)fzJ8Zt{LO%HT!*kykVtHf`Hg1cT%T>Yhv(J{Y z@eb*=Y}r3`i_2JvjTb`1wzWhkEO43q$d5_6c|aj2mbxf&6}%ozED@@D#Y`UC0eI7@ zY`RD)WVYP+qXJMd2y6OL94c5eo_R3GkDLxUg`fy0gX$Nj(^|8#5~j)0pe=gWErJ1s zz$H7WH^Kc7`G9r!Sdi*H*7>7++C<4s!uSq7=j&F8+HvD%LUH4Kv4V>|{W-3D^t$((}1 zP1tlamQ9Eb4w+JIChAKnIBs(lKv=EEl;(dFXR&`5Op7fze4G1iv4X|PIlWCw*&P=I zQZylh4YrkBbMj4Ng@>`ykIQZT*q=39-H&~*PQ$>SvictfHj$74v~^P3|Km7&Ten%` zkINE$VqOhlSh1;4*&v0M(tk^ zo{LHA@Zok56#vJ8l+h?kF4kN1N-!8s%~LGsKuksP)K1y3xBbdN8_BkVT_ptfXtdSS zr+AyvDjxlDEcI@zCa_HUacnGcRIK)6KXccK_hN&_y&BG_w3&?I&+vwmzAQV$K8zDN z6Jsi@k4*nx5H;ZtZfmpo3ZiK#?0^drB3tONgw$s-R~M}~nhHFTmK*}f`+IHKxApT7 zKP%B%k_O>$*#+58a&tWahJxzU{Uq;G6;++_pMxi=|4E?vUBWC4(H1gV>y1w(a>}f+ zLzF;M_FjkV$Hit=-IZPlr@KsdtsloCtSs*RTxwP zA%$(~$WcFYfO&TQByf*N7NL{gYDlst#5N85q@3WM{h~Lsfs9#oVn?z8%%_yu#PzY! zgzRD5J~32xkaz5`F^5rp(TFOdrCS&)iyDIKtkA|aoAhxxumr=4${yQTblEF5<-c-dKNA9{d7OE2Zp3@BvjvX8z~O6rwr zl1~;gjX>%~6xACMOKidIWI48at8!)5FUPigrkMCPEXS!qaUj-e0_);NZ~CbRttq}1 zZh;*tVf3TSZS6#8A&l@LXnz9a*t(puzlwKlTaF8kh18QG7=4Jp`J8nmbRTd2*e9)X zxttP&K2Y<$*$AX2vx|Wkg+MH5(}tJ*YV4+%#zpvL;a{LFSs<)q(?d=*; zzy+)OX{M2CvN_dyQ>q{HgE0JHNZ^xhcz7QISm;wpsH}3n)9g z>7m7YAht;J$3AYYIi3=FxJgo5bRgc{T_)bJcJDxzu-4&!&({9kzq8JtmeW{iqZ8Ck z7`+bsG%i1^9m(`CPyK0J9BCo%zsX zf`kKye&~afwv-cLQ8(CfRRdq&&_S;JwA|;u+aXyF|qR3yNhTNZ8eilo<7qhhkdbC@81nj3t zit@8UXdKv&%RvlAbY)U1qN{GK2ES!}NYXM!Kb@^^PXN{1e+Mb9`iVH-5=_{|^xYw7 zIx5S-8%a_Q$J$AYEBn~&0p+0V-!_Q=$#?&oENd%Ak9{^!1d(ocvteHNOw;pe`PMIzGZ?PP6Y zO;K4UQ^j|e_o(^jac+h%LbMkDh2~4y%Ub;x*Znj|tSws0vU{A9C3EU_9}34eP*X?M zJt<_%Q8{6qKabt$-%(5idSeecTpurb5(c9v0)PT)if3$>&;h8Md6=$h*N-et!L)OTVBX z{dkU?1=Kp0*MHTj30CG|Cl){%B4>0i~ecuat8P zC83bRwdhp#*U!A=nNr8SMtP$C%qDY5ctj4cyU=#i(S-t6ylOMmucr~Q%|(Y=dbt@X z4flN1kluX47NSPM?m`YM$YR#uX(2A9|KhywgmqkwZ^}G#(pKV^4*prQU-)EXm`5^$ zUnISIV1F^ss{chCYmW{r!-8MrlMNfB=NZh@_>0($2MOWS6cwZl(EF{By3H{PssZ3{ zi5e10N&ur+maS1k!3s`V+b`nCJc_LZ!N9!#1G^MFs3ZGA;_<9?#xGt%&QWi|kLNlT ze7F<-qMXbR6M=pB7v+e58N-5V3ZxR#3N!Q3U*y%emNLwR+DCs82Ypk!jpan~V6<8c zEen`eY&=26{uBf_>8-lRy>8P{Q4H@HoAF<`=3>ihbKVRMZ3%ERpA+C_7X{UVk0^sf zE_wpa*2!u64N&k>lquxG^DO2`NqwsYR?I@t^QUaZSK_N*U1#>o*e>K@R`*Lk0Fa*a zR{zUD@@V>$pa*rbHT*IVYcF=1oPfq?AxRuk-T0*^k&X6Wdxmh|NB$qZ+$h_Z;kmK0io2>vVd>;RXJaMa1^}r&A$o+1HD{AcC`3s z$hkps#kKw_P9~Xotu1;J3M36*2`k7xU==g+&Z>#hBFUx~|I%N@ z@!fim#7MXtI>0F7$WiRw5xc=geif%{M-S${HtKNkJbsPP*GKas0XMsL+SsoWSs)r? zYw@UOx3C?vS^@>-qSRd6N*j+d7jF2ioy>^}S4{sZt^|Ib`-|3lCcD8pUk)g5a<%4i zZX93*?!h(4=5zJ-AK+zOsA_nY`Ztj^Eavw7#b4S|1&9(LmAve>>^ z{d@mT%phB$H;z89V5|QsPv|GCEl1sYz}o!_T3$)&I#$YoAb5s#dZi*R>W#s)+KTKC z6>S46<#6@mB7mSK`yV6~ppC3tzoCLn8>(QnqZL%oLuVh2dSw%JDL}){3yiJ2-Hf;+ z&EIGLjj=VYRso9klfFv!(`w9VsLjc0-8_=I?3|~4);oKNK zdk>aG$O}0T#=>&#mRsIp)*)P$7ISGO4kGdn52m{81Icb~#g`BD9casbo!D|pQ4)CS zl{!yx=<0r5u=!WItp3;KctFD9^k@ygE(hnW!(iV={{rzVTCVBW@#aK?J58`T%0%MG ziBNV(El~#dECvL}T9RC385rqt$|M48%QBDEE1MQ;|8*dit=zYc1ne^hc+NTl7)eO* zJd9ro_`#9+bs~j5vI!slbvbOGBZ1tA|M^+GxkqC>w}AKTvPYw=LQcGtV>#=$N5I*& zSV0#)7OEDEDdop_w3VRw&EnCAhnWxeAP@GmPsRP+XM9zO+r{gV^Zq*VbUB=2OXg#g z>&#KW?-H*rR8Z?!$(;E$Tl{sL8O05OI9d92l7svEcj-Z2&XpVJl{HdK2&pqe*>Cb; z$6#c)A|KZMrffNZ{4hoOjpGXG_+PVz_$6|Mq!+)5Vad@T8B)`4;yGed{T`mM=HJ8y zV$EPJzlm+xRLqlFqc_p8=!bn&UjA-tuUd6MQ&b?FWtBS<1S0D_X}vxqXZ1h{uOso_ zKu!pj&aM83y$#ywBHHy|a&5!1XEb4Y5MT5zJDMNAD07stoFP&-vDLM#Tm+Oh?jXa2 zkPb260u^8nGo9VE6FSIdVqpqB#A#Q)lDViaq5m11|4lieAO43wvW1*`Ka!Hg><2y& zMR5KqIVP5)SIATDvWQfegamNco?+I}ep_}A5x4we>ikzxaU2l5 zSzC;wv84l*47S~?bERH~%jx)SoXrvqM5_Ew?|{H=h!c9>0hFw*6v$FS-htmZyv%0Ik`65(G z`asyT`m1?~q2C&=mX-V@`4}3bZdwW|)nHSW6*f|9hy*Fi!Z4zQ8jUiUMPo;TaOyQ{ zz3LoAyS2qv$FefFLKb{Ar`tp57#Paz0m2HGd5{hn(XtNCQpJBQ>J&K2%{yxYQ6~CS z$qXS}Y}jYmrKIXM5*4t!=llz~!+2Z`JSYasj^;4Cx(Yoy=3j}5N<>hrMO`tNq`#`b zRH}MsyeBKLw7u>`Je^<_V>}r-owAuI6M-)$2hqiIzW#soM}K7V2}XAh0Vg0N6G-Mn z4ASgSLF+h`MIC%KE+}kE36y`(mam3E6bO&psBUw;a`k$`s@O4xv6}rZkUVcTYID(P zR`1D3e7KTrZ?Eii6+za?2X~ zYu56+xHpKB2e4p|j}kh*W^K`2#-cFNhdUfo@@Wa9QRX(amqYI)P6j_B# z3da&EF@J$;Yrpe_76n;i+H&c9tHymL59}bH((lT>`0N&&PACZ3K!B56m)%8UK9~JN zXr)eTJ|?DI&m*=FHFO;^p}=AVsmE0n`LUE8_w1AJ!LrY`hgTjQa@1F7+ouY8B*EtI z{ZL3?pb!^t09@Vg%X)89kd^x1#|FwRbkG|76H)}E64A6gJu;wxS!w!xnQu2a4Sruv z*1#YM^nUM00YLJa{+3#bVOdhoP zfLSJ&kdD81`yHaK%&6}kVx6;|Bx@ahtEME8MTyeTS>$uVQKtB+3v0(BH8_+ z93PGxWW^ui_$Zz@n#5=+(V46~PC%cu{vlSB=x{u)*5-BmPAYu7&cq*LkCCbODd=BE z0?G>_shN&`XO2}O8(8ljd=J6B5a$xEpDt&9!Ul3gQnWuJ3gZvu#6b)`-fttmAXGhz z2w*hE5iOzg3v%Z=n#~co6MVwPa$F7sba$;9SJK#`VNJY{6$cSS;SL>ryNn-215%@D z?^}A+;vF^BzCN!**FjHjpAzbeSXdM_h33{i4-yC*<%)GLN| zES8Ad4R-EkMnZMQ+*+#6u~K4&udy8)?c6FVJfiQdz7is6xp159+-+qxJ4HGc%c3?r zCrgd5{-tg4W^CK5)*js!?OTu*+j1y&9!Fl5j68A`d_`9bwfB;3&oU@jA=Q@&;L#ai z=+vCFZlA$y?K5~+_eOOw%UVoqTCFe2*qzFXY9PDCbxZFa@)sMjbMG$mDSM;q6T6^0 z;sJs(M!_LJA$UU-B7G!!lg*Swb-eI)I2Pc)BODox?;f&1aJ*_qe5pSCCwA1%$?N7n zLj)YlCG8UNRD4>-@^}`zA(#$39=#RA)d=%$qA zT-avw{9sUi#^$0%;pWOS`-+{QWvgzu9*2mAe#PpdOp-u3zD&ZntiGY#J%n&4 ztp0{_O3-52n&_BeM6tT=wb0THUg;|w-nH2uBVr&~X!ph}bA2iDlNW5=4F$V#KAwk$ zXu6?X;H^7teZt5h6EYnk-wW1!LqUxFJ6-j-;fA;{>`meRTC$Do0_jn%MjLM^2!?<| zY))%b5bp7?e~;Vdtbn@O+MBd>;hI5_e5dK>wvH$<-*^sZU zLS`vb%J2%FSZ1YjzBDLa?0kIo=r(RGHeZ!Zi0R|TJZ~3bV4dY>Y$10DHzd~UVtf=K z_RqE$&Gqy=&xle*U-C(OzKGP9e0YLY+U2M#uOPkRQxFWxH^g%)+eUv^u6T1qiCtvn z$-(#X>~!1J{E6I>(_uF{O7S$6&aSPxF|OVolu~s!#y!VK<4Ie6V~{xe=!^(%(Ha?2 zA&)UPwlz1#?Zz90aF(!0H&*X`@RYXpMn_Qp9c0y*T|#^!;9i$M_lb(KrWhWUm{)y$ zj0ds=T?!k#WX%afp@aM}+HhlBWyL*Zo)FqT;%sgEM%(Cr!vPW=-uqWvGkD?63A?Mt zu>^_X6}0<#+fohxTnUFlWE^YH;R`7$74}oM?Z$$0KZcaBBR=y4f)2@?j0|9VHi15n zZd@iaoemQ$NS}2l^bslz@+1cqwr1Y)WbE@6dv_{${Ld+L^`;;}xDc##VRctWSuu|0 zJQEJW>Z>x@2)I+knwt`p2v0QlFN}+oXJPG4i6jo}m*q2ux-qLmji0r3Hx-nzzn|x) zDLU7t%Rv?%&eq=)Xh)8FbYRUl6)dLs75i;Nv?x(OIQB0&#$~Y?gM(-I(=;3+@2lQ9Tf^a&YVv}i}c0{onaf^u)eAWIado^jPl^7a}PKT z@{R~4|C-XUAz$Bqa=O~E|4oYf9d@V!Oh`hS^=E9P8cC)x{+=Jsb->~p!NzF9t`y=& z0!~GOMB?n9`U~lI%!fiYLUfGyP|_G5ujWS7d8_4KeSYUqzw{d?bGThdt?^>Z;lM*S zaZ^FR3QIHT<$Lz)ZJ&z%i)HNeO^#*?YGFNbae+-9ytDMc0PWIogsIx|8f*$7xF&AUArmS!gb3lD>vI&D1afhkSNUE#W22`{);LnC zZitTER0$_o5Pr+5Kx&e4ACUFSMxQA3tF>M?Q9Mf#$h~QstMSZ1+g%zqrnd}G=Nk~g(Cu7s4w~_V+W|)b+ipkG-u834x+OG*e zFGM;QeTBnw4gpz%cXJ5^F%@O5rqg+O)~2&wkxM(CJDboB3?7u*n=e3f^T%zrsx2{; zz&dSnKFjTYZ0EBbFDXL$d0&|jK3wo#_QRwvTsxg5&fK6~tojviTlghg^nRO~>{8wZ zoRv|7*-|wRf^z`HNrhf#UxhUT<*aal`^94=IT~S%E1$``sRGnOH4O;5Y~HxxBeeAQ){vTLQP%_P*20n zL0ib~fW1VnX=|fQ`|AnuUbV)X;~W9Bt@9teCiLHwwGlYE3hIol_b~@BinZpO109t; z*pR3e-a}r=9s<&3}wZzljoc)wM3T^&HxgTxI%>~0L zlU&vztm1ClR?YN{B3hKQa!0~m4qKr-ZTroEFndW}MGS$eiqg^PuuodIcMMoBK>_wA zsPgIJJ(Ehauc`stWCJ$`hF0=m&dbov1>ZL}f5L{nu@nvE=t*>YC?UWkz3UMh@zvh6 zOK9_ObSyC_d_i{H)lDduAv;n5`$kb4Dm}^3SYwx2OQ0h2dx;N^MPKr{!QR!^kenrt z*zxG62-7(J$`)A5llTykofK;HWEKqW<%R>>c#ba7W8ohrY$72n@kk*KqK*_t$cTA+ z9=EAHTk>R@&dH{2yRMN{+xyOgUTb7>j>T#icu zABF{$_q#_{Kv%q9Oi*m4T8)BZ%Nu;vt3Ij-RQ9fNmfOEztKQ`|1uWr&CU23O+tgKG zJQ3WrkbkS+m3RA9PJ{OHWvhQzk^@-!?YA}W3iQ%5jFCNPx*^)YxS(|}7QeRkUFGg8 z+j);QzN_FLshMnDRCax^AF3w*{;;eq*T2h|4*`9~nsclo>*Lkl5T8#Wil4HU=!{U) z3L1eP|7F`4bMq8Rg$L(cy#~?K8g1n_^@eS(08!{{=zNqRZB>8LAA!qw8@I$vWN>Tk z?+WbM-4hW|x4kRw>0n>cCUwN{q4s^sF8Ap-RR;JC^%}6JGIa9IM8D8hDTAXC%rc zWPdFSU|V-S!dW_*BS2NWs7I8$KAx3|mK%Hi3GY-0u>TR8^hM~}*Kbq#VJo@7Y&zS? z?0-LFr}ASj@c|D(epo3HV$bs7iEf|@{E=Py5oFRuc)bE zH=eio95mF8cA=_N49#T-YztY2^hS?ALB;K&uOsgVUTBNiivsdV)^y2d7O{$!szWB7 zqT}cC9@^zRFL-SC@boXoNCNmoVC5^(x?Da@)pqNv~CFtZwpe$kSjHly0!7@rg;`DJVMUT15c_?T_>zu`lWHs+*v_k*Xmwy>3T*b?|sVw3w?#K;C0fSK;?W_A4pkn1APC*lWlA zdm&}^#jv&PA2eOO7eG8GvkZZ<++AK|^5eOJo{D(TCStu#q1v3b$y_}!*ON9C9Vj=j z9Xf!+ zx~-}!r|NIEY_U53bEv1sR@Vi&zw;pGSJ`44F2txb^C!t*$*b0oRd;h8*Vg&vmqYCo zlYJIezOgRQ49^~z#@0n0y^vt(f@z!T3WAVw%huNg8rs#vEpGN-)EhyPx}oYH_gy9= zDP&8XlSXGy(7eL?{CXAOa$}C%KTs|~YqW+#E2x@(ZFBT5k6c`k6E;X*`3Op$v865$ zVOL+cEw|UjeIM-YC!;mIMGmBpZ*o$~Ze$@MIt}t*d#;n52Rf^HD6}Y6oe<#N30)z* z3M#kWTY6V>LZqZRS&`%O||u zXWMNuml1Ir0j{U&;<`&>=4$8ZfWu>f!x%A?n6%mKSWH8uN8%;)@A)kK7Y%vFiLeMbyXidD1TB zl*Pi@QZCTvcuNaHxE!FCyTSs?PSlrvIeNR!6~(Uj(tkAmvlZ_>TsE)1<5)ZqnT8Zz z)!yy*z9?_?j<8^>-tE-?IOgteSlzqJ4KJ$f)$v6JmAIM5uP30aU$8arF8HXc2hrub zU2i7Piw3T&^)>!xZ8YSXvdWF`4m9&S!t$Vkl1yDBaWDWxlREYPccL#F&EckEz zyUXF~>au3NVsvY zi;2{Jn}|h#S%A}=j9z3`IptqwYel;_9b<|VRG-L{?X1_v^>KsE#CMOMtoI*w#J|Jb zqrQxBh?0hC?0nSL6)jRX;fg%dI|+slEpZ_sV?@G#M2nQhNRaQAiOnlqXT4YLMyIvrR>v}mrzOjk zSD;oKqD+RxQt|QF+FEb274o6+*1!UitrU62$5#Z^ zfZH6uWGsdqX7pJ^^`K#bOfXgXFSZ0Q{>90}jo$X7AN^DQ-&$`C+{2Z!&9|1r?I{(8 zu(@ihCdkyVzI+<-;ufFG4tTEiTMG^)v%+85Ht!a1Ea93C?{@Nl@*8pGlyzOxm*^Vob}xr?_+TgeRL0wj{`C1j0*cINdx>440&(FJ|al) zu+K<=%^gb|$^z6KPH0q1iHWHlj?r~Fq%D`?VbmwwHHZ>3Ps0~_Lcn*_KYkY1|6>7? zTNTT5<7;|~d5y-RS7Is`Qw!Nc9ly2Uc}!7izC8VMbv$`%&_bTf8a4{i!gx-qmSQNNkOL5tEe-D%tQA2Z@yA*cjim&rm zzw{5=ZNC*n`0Pp0MA>)ZH_D40XKv)S5i*%CD6cyi1@ZDi}C z%?`4p_@6t#rWtk{~MASM*GDF3z1Sw+UfT>JzA zxx?$WC1xTleZty(Ea_`(TXZFftL_V|pmjuT3`3arcJJ6JHe8kbBR}Qh*B#A+t}oc2 zH^ImL3Nb$1OkXvKEOCUsAq5gZjFDSx$U(y4WS0$Rd!976`#O~Kg?E?bsQB}d0P+d= zUsRaadTVa6(c4lE)kVOxZab1|gLH%Icr?4?VSJV4lX>8=JO$#H$9yk$_1p1;Wv}u9 zDzrhZnhP=(@E$@T3(dgoW%*$>XJ*xtLSJ)sawUtm@)ziR$F_&t8dfy_9PUDneLXyx*5%tC$KB zjmtUqTmQnZ+ zu3KlTR_8^hy%ffEtD{3+4onxM{93)b+`8G?-~YYUuZ}~r6`Nb^9M)v}a@4_04XYgi zpgR-?3QO{Ytz8{N2=bw4tTAf%Gf_A{TNh;+-cgK9)9Sd+vU};a^;sn%qCF!Q)zyAV zoXZ-Q`EKmYhSdc-NSzak=a!tsql325|JKE`RtIjjwxbUn;pWvrC_wgFTdoEBCX*C) zZj0AOkt;n{yDwxPai6#4!Vor(q;=Ne9f6?vu{}R-ao@vE|0o}%LFc+6C0_geT<%OKZj#p*R zU{NFI&{I~6zK8<|p?5NVNew?`<59n4Kb15Kl6oS_^myxe#|YOZqpUo_x)nU@Q#suS zF)EzS!ztq#iQdmTppU?r%=nNTkOAJi**t4F@FA9&%h5p9qNh>&+WD-4GLd^SA61^1 zB_@|J9FmU>?VnWovqa=c~MTbaM`M65~TMmxZ-5$>{Se-|euobJLmQ&-dH>}T=5<9Y5#1yeLwE`HpoJ}Eynh*J5tXH?( z9w<^M5fIwFJ4lQv+ve5w4&sdw?ZvIVVcWe@uY$L9`j^?-Tgu1?eH%sx_W zqhd_3+3d>UfLXtuw^5(Wwuio8N4zCyv4@Ow*`xMHqh$5o5I5E>9RG^4VL?|2jrri zu!bm;+(>Ace{Jm@1x?J>9+k?zF`Q-|1xZ|9BIk|byTT4!ql#v{E#6vwOeM_%iNme^j)JsUWZM=n65|Lus3R9MXjT?;do{61@$bBl z)7F_|?A_Dd4PDcn3#IfR*6Y963Ti6E+YtuJN1<<#PCqMB123Af3QYJLF67 zP)VOV;_KYrZig#yUM|^5tAUm4#;^iQEq$IP)s%pd@XI?T9 z9!r?^qLvKZXjbhd@FoB(l%(>j|g#ccX_*-X_{QN$o0d(mcNNhFKIUNUPy_Ptpi z(+N8tWg-Q!(T2{#ddg?V%$e$is)8JKU6#Ax`|#l}*+u{BZ?@QC)RUA=LU6Li*d;$; zU;2k8Te>42Z8=I6YuL-V85}ZJ`2dG(Ilz_4J+7V6ql85~fymuX*h-XXRZ8q{5Y*LN z2kBACrg>-e?8rT#+g9D_=z<3seX1kax;y>p7uAB})t!EgN_0)6aVOYEf5Ymdo<#O% zY)$-<LAD*qdv)uQu{yox?CjWbz49C|e z+{GRatk-;J!77g&v<(5DlLW<0pgfPST1z%k7Is;UZp;EqQn2R$ZDd>caoh>s9ucma zC4!1=_Swis1QBIht`hn+IeAW^#MqK$(rXqBNu1_8U034=#qh8+n9IJzZZAU<2fkjUNF|GNhmnAQ-3F8i=3N8%!f$W zkKg#61w*$GLun!h580i}Qzw0(HsvavinI9yP9!I7I*0A`a0zFtVV|aRZ0?|0XR-yS z`J~O>dG3xPlawq*FK8jz=5jSixpY1^4KJ3mY4iTE1Rc3hb@mFh*Me^g9WmvLcgEF6 zZ0a^Divf*9`54lI&A=5Egfbw}Z9i>G-p%09L0*^3*+^n0DoEVAoEy}&L*YfQWEHO4 zf_w+@84l-z3185m#i^ce)#~>t9A8(w00;@IWRPN z%`f&IwDsPr%x%iLOB&nkttE&gK=y|0Yj+Qq5PHz}7lDl&Z5#i`9Vhm(f$N>6$B5*2WHjw>@DJck`KKUTDv@l%5 z0fOUA&#;FX7p{1Ra&Cp)b{o`@99Kq8gLc?Q!q}d~a9N!#eS*0tG2xLYi#&ChX+vQ6 zW=rWeWXG~XahWOXo{dGBo5UbMz)a(Klobz9)?QiUYI$n59@t?gV+CY{@Vbr1q{QaR za^(aXGO^DmZKA6894dmz?BY|L-KqS6GQ_46u%f2X8s)`J^<*qmY$Q8)I$69ZUz9mV0+FJA1?30YN zxf@<;!C}UGpf0MZsME!Z%rmCeV=)DA#llTSC?*Li1 zSZ}eI(z8)psyeZ-JmG;%XL1xG)ajhf-c_F9ehAY!|Fa8?>-jttytVUp74*^mpk2s? zD8A$D`$8UqK@1fyCL9oL3I|&9#+$t-5XM7F$G*ycL0Z3#P^a!a z1@mxR@&Af1BIw_=`u7Ab5hCfrBWlfi96Jb{X#gQmhByZFy$xAa8AG(xYv1EIEbWFw z)iy?%8v%H%#B~W-yHwjv?lY`WT_rxXKCFBlu@}8iF zK1(>2ja3628aX5iInnCfZC`Jj{jb3;YfGS!LZPL{pehh|>|Xcb9BvkT}5aC2W)#1pRD1}d;8F|rd6 z`GO7k=);&sLo=@ZeL&DsX@lCAq%Qod*e-`z}dy>oV`mz%L;JtATlwt!;f_a5W&i7M|JbhN5{Mr!&P6LZ z5M+;Xb4t^A)$4y@o7sEIMS_8`RqwrCaT~l+-FwTW?;<3%t$uGvEsC6|{=LDE6k-e; z5j1gY-dpbC=eAmd_uKU0kJ?)AVz$;oC|6BV&sw@qzj_R{biYy{tEh=$>=HSQ6`q_r~d$5NQ9N z9UR^^hpQju;g;wbS007FL%x8m6mfY7|D3%5>kGCm#&O16_wjY>@JVyZ5nlS==tIt0 zXS9R}g-Gs>5sK}C&s;Z(L)!b^K&iMyA>||Zly?GGPco8h1_iAJLpvE$9&+eE%(4&r zNVBz`ZdCh+{1-8bc0;0$1jNG0XLR9Unh)m+cQXdNJsOZIqR3g4z$1i$4{>6d5=(B*{F$l;j52z$|e>-NPKUr=5*XC5Oue zoryB-s`z)sB%BTKPjTkY*j&^IlMU|189SdVGk8$ZhUa58kMO2ms0IjVyY0e& z%9^U$7tf}5-!tq;6&Tf#mMI`;kstwqk2SGLl ztqv-IW-<2M?0{fV^@z3k-!OiV1u>tZ|7myF_u%A4O4w~{0-yB^V|xZskYm6Rc^cnm zZ2OvczK99q7M7iB61P=SQ7*bX|GKM{d6xIL*MT79<^k)Ao+HpS80w@AL|Hn}N`NpF zeG@;E)9$C&3C!UfHH~ki<|OE2a*-rIF>2N9*Rt72n?kR8f=X-&N8i1pC;xvA!5hgr%_lpfiN=TkJW5aE^o z=>SyDC$^GnT_!weS94c(U_E4a=T(0e<eH=p1x+p>DZ5t|qP=J%Y(LMyo=(fv%o~N^QNndbCL) zhRA+%RMLJaDbZ<+ecF5?+a#mgQVos7lkYMfY7am`rC6q1?*zH+y50yV%5!+yI_?f! zMq&%j{`Q!&KE7_9RadU529cGc^Q?7ySBS94cnSL5heEpd?l>#`#gA7n%0cVPh3Exs zF~C4VjA~uayJ)qcYN205Ejye`g(!I+sZ3C!`gUQ2!W|j$>F2r)rYlGc@0;287Ia=jlCgPt@k;;5Tbs?R!0rJ z9A5)pKyzEY*C5zsF9gk+sDaTOmsg?(w4m?vYwa0NJid#+Vr$5mDd=p-NsJUKWi?>MY z!2eEKUqUSdhcdfK8%Pl0TX5!jU%}hp0*ouF+EV@CAO!+Dlo0$4KK L#EN)cG!RH z0qqRhXf`e34tc2=rqC6yHQ6DOZCD!4dvf~Nt=B}YG ztND;J*FiRsQ^zP6G%%j$MqT;+VS#bIMnMK-B0y0t6AX#YCSK zIP07Ez*pDXOa-Bwn;g5&<^uKh57=C8>)?UiyFn~0)AfhTX-~Z1$DsON;SXg zp19gFkQw9&sJkaAeBeiLaZivU5?pXf_NYkZ16?z1o9`*N8YKV+qAf=&-qo_H ze$BR2Ww>9)@o3LQBs2mPPPc7~N>bgt$YDIgx_3mGD>D_)Y23CuG_$o$NHp&$2uV>t zyZujuRo0u!^i~aKr0<@frJx~)3hOsejpQ*W<*hrED=qYmR5QCMrn86c39Qd_CG&Zu zsK1m#)d#mo4d&NVe1;oIU698j6*kD+qluzH^j5t z!<*aO5L-Hk2OHwV4f%01k5Eg)xd;OLztFSU#vB+84Fk72Y#bX%Ck_Ug@@#XIIg#eC zMw^3$<>bd~i;s-QHCXKc7393R6H zSUb(jekfP|u`U~l26Dwf)f&Hq{BRhJv7}jgQvx-#&PSrmPlzlip+Ty8QpR!BsdwzU zGU*<5ZpW(mNy$)N`I)(IlqL?9%S7*}$k4QUJ+*WO( z8ea+PWu!|K?d(Bm9vHt^f9Abl!~fi-tJQcFt@zofuYD_0W3a%?wLZ%GhWglM^EN$n zU_Tb^P?mGPUk?u1c|Rc~&Yq|h->N>Wh%V&FkKin73)yeEWlj!XjM?cahC1R|_Gk?g zwwUK`7Xh^Q+a>Q+!Ge~eAB9%d8Y`dm<*IK*A(iRexGj4V+zsqXOj=$)gtv;y$g$u4 z7q;g~-ruX&bt;FmH<6jx`{VMQ(6#KaD{ROXp5}T?H zCeX$e>rz|){=jZwMP6x6toi+cY#x?R$S}T^JtpX0QC0AZCrnf|yF3roVsW_4Lsvgls;u>tr@xDkt3p~txS%~ql#kS|H ziieQzYiCx+Bybl-oiWLA4$4!Y6j$F!&(`bP!8LdTYN_x2f&66=FWY?^i1k;@MUWa3hGiVj1HJ`@Xf*+GD|{j4n~yl?*#n81zau0*fW5JQOM z%HR>x19rRWtsm<}nP~UAutNb7p0ZW<`mL4~FYafcr6;YfDwFwUN!nqnqs)c*#g@_P z?=6T01pRSab8lQvq#B>x<@ri2sz z@U*Ru8b~w>_InBfy4h<$)ev^jHbi~+S*Z3FhevXF^eFL3@7I&on$U?DAl`oVg0uq1 z(|a8u=@f{pyXMfnC7X50sL9%W@?Y+Hlw3dA1;jgfRufe0*9dZ>*TJ?&w}md?#+{G} z4ACymSm(V-R6Vw52t3yvtp)3ef-FE2KGwt1PW8oiH?83*fV>^Z@dwEFXG7Ve{Dfg9 zhOhO=(v|dqcF23|SG2Q{dkemwt^K1NzPBLjFZbK%I{=mmF$ySQ89Q=sU@1Jc_u&_^ z(~jO794z7*NJ&X+j(PtGQ1{wc!b30#h3Gnd@40%nu4{FF-KQ3jjNrGe({wzJV4Cm-@)#(~_g z5E!$$y90ZAq(I4L#39^k=W{lH@>e#WBWw*p*oEAX;>}2;7Ea$nuF(JsRLJ;?QOnV% zG}Q?G3ksHC`>+ENaT70~7C`>P5jgp?5ZXpSvrkTz4iUstL!@=s7tE;1`# zCi+4;_m%s5U{Jfa>b~GscFQTU?!H73vR0E){iLnF@A`MUyec4a zf^=DZ)Yc__jgu~A?;7uq8cNO3a9=rZ%4f1RD;FVtAsehQ2QM)e*WFi6eHHw9FT#eR6B_xHD7vex*@mD4MV-Qh{w zd|$zBc%Na3;cGKnlehVnm^n8Os!z92p_Jw4*?wO+dk+mOU4Z0r+wODQM%TjW-4}6t zEf-`*&U91XV83mzU__$LtRzI}jIu%+(pJ$gx??d3s|o4co2_7|MPlOcVeh_11&6=Y4^FMC6BarCKrz>H*^!gm$#)#Mq6q+j5{Ssg zV~VGVmq_^S_@%@?SYo`RmE_+NWQvo~cddja zLKq3%ChqQ>|L7kgg1Vj0vxoE!8hYMm%gepfF5DNG5_%#Q%KIiVzg~*%N_WF{$j!GD)#PxXM8^5M_lwq~17m=THs`N|v-%2yD1& z>yduNq8`e6CD;OY8ZOcMqTjO<=cM!DL5H01g`?{^oZ{+00AfxKXtq#t@`mXyx!DGbKfLd&d3J^(9 z#`0!GC*lM3l1sh%It^>2MVzz^*LC@#L?L1|qAU)2FpEN(!r$4v$YINd31DjZ@F3{|pB9Hy+HWcfAMtqVD z=M2#rVdCjvAp(2MMiMNHZ@~Jv=Md|rYWot*%V@TRk8HLhxgw%e_uJ7A6y!_PVLO)f z`nrh!GZy_ipH`OMj(cTz3aj~?NS=hT!O3b)(uw<1FM z>P=;Lo9J%3s-R<~YR}o(1Sh0~^75F;GRPEJ$0Pi)*$)H(AlGkU^lqEWq8&XXS1JiJ z&PPj6@GHDCVe`KC2sS*u(1iqP0OyW{s(}&|7Cok2^f|$+_S#~C5{cWT^}LkpNkC`( zJ)#D_g%GsPm0yms)1W9FLFEuxjx`?oie1T*R4fU_H>_lrAgCU@nx~ZpMWC;&;e-CHOPuJguF^H@ zHCy|^SZegJ!KyWSOT45ZAlBtjo6+*@+;2@WlyYC_AtFM&t^Z&!LW4xkux9@S3q1+t zy&*;}qcP^B#Jb2HPFagj2tw?nZHzXLBiDMuTB|XL=@xKq_Sy$zPu=#xa@@ONS+;yI zsFFdlLCD#nJ%I1-29#~lk^8{A%S?wiM&)DM<8#sV#W5~K_t z49q~7?EXQS>zTS)$;33&IxOEGIs(g`7Y7Qmit4RYl=bivO)G_|81=R~zkO&MhQy%CD`bZ`>B6VafLtWtXC? zFi4;{$<&u}H!)nZ%TXaEUh=VC&S?^}5MTAG3q|$xU0um8kg?d+sF2UA^bYo+KmqNL z!K*&xC_s}IKECD`Q1_u^O%&`;DB>AgU6r9M;p8h6Mg50ji7)^rl;E1Eq7G%}p^zyJ zA1cTTRC&PG=IC3x9<#;|1;*-Dcv(^eeW+aFKW+LGYx+=JVZ}C;cl-M2cpQL3AN{K4b0vYtOFYLB1_#h1yJyREI<9%5TXwwnt-yp?}#reKbxX->|swkFwu_ znd?nZ_V?_@KB+HSiupNZ1KAIR|5G-U4S6!K((*OF;J%Ctc=9WxPbC< zg1dvj$PqtaT3hm#&LMLgy3A-9+oc z5j~f9CuutELhiGozv5uA5DOuTb48?Z{idYm7rh-1xnBB3pN6apy6sYcBKf0YWIhQ+ zwUlKn)tgLBk=8Cdcn|%xEyvg54$BRDl{0iD%An6gjDP)QFz8B@N$B`$!8uRc)$AEs zM)4Gsp3)lQ0wYWACa+JU-+Rv(u6zHA)is7}V3t+1)s22@|1ZLT@qbp|=r|-vTq27D zf~dGPjb%S}lVUu2EmS|JPtiKoMmtx^IqmH@tg*2mj3NzRmo19`N(!CFt*J3k*kkBq z)_VoXXG;vAehyo6HY(x;XPABZ>yH&Md*_)u`Go6LKnQ(+T*JPyl>dH{OX2T){$+w$xqt$=-maQ zB8#83PG1C6+Y{F93;F3y*6To#DHD9zms{7>r#Qm{6^!V&*#ZTT8p@vGW^rdMy20TX zn@61se<-%0EIl>!YIz#;5r;qqMs~O{C_=<^K}C-?7Hlm8mLm?;!}z#ifEC?8F*^nO zdn^EOO5zL*&>J?EE89oV@#EPu#_!Ufa_egeAkofnBg-CyE}52Io$=hhFMtcW_M`im zh-qk2dcH+<`Qtm8tMr+Y)OP9}^DS1sh?X$EvT3jQ7U`wm{--88o8!W|$%E)kZo-Uj z#KW6x)+-Znw_w3JuME?qI03G=IPdF+s20_yH+*!DUC7DrCdukTjG_2(Qkh9Ua?zX3 z*8aTN7JVgVYhO^_f=mA86Jqw3qGd7q0Xnhpm$U0OOfioNEU@Z+#}h7zk#)~$tGhoAi83D^DB+n`-(Rlk zRt1%``e-9D+Vjj-#)xa8>{A2nATMdlX%uS$$E;f_&`*@G0ZKa4?o;D&!#4B&z&PBf60W=>p(3UL z*L%A|CDIvzOXvNLW_3tp2~lFL6>hCN#wj-*QcW`I-Y65Ma#iR{)>oC4pgz*3*g$S7 zR}INTjFGrN{xvRC9j@jAO~uwpk#Q(jjR4I%Y~=oe4)!3$KOB{%WYV4jtc~993LUXk zy2V^F3B!2wI6<(BM-mX?MoigJU)!D%L-SaUjpGP;dwCU!uNT#&2!6qiR|6I&wjzwG z-58Eh5C;c2C0K@!^-{aAZk}ADu6S@A~{Reg+&G5Y%?8gpesv1uQ=hGFi zCy?Nr^X(&;CM8$%*n@tG8duGNR*F+T$-viX7Fdv+9Iw6#TFr@#JGh@7RQR-mRW zUelQa__g+G!CoyQMsnxcwg-Zg*@M)t7=3rdXtJLmmsl()F~O`eXU7+pv*Siql}+*n zN&Gxzz1~9s54Uyi0U;-D>%LU~_>cc+L%F(;b3(K-RP=2a&Mpg=U&8v@p`0|nU`n|( z^8XU|{;*wMS-gtxfpD)CkFfV zpTx`%$!A`U#F5ZBz7o>ql_(r=f(cG=5>H|hhoFfQQBY7&P*Av_prD|jprE*)?_O&? zkSUJzHuuhG82_GUKYOpe_u6Z(|0{sRjS}~S$0*KQXl^!8IJN-Jm= zm!73+a`JqW3)=;|lVCFZ*Pto8X?H7-vaWfS!?~A0P)$<)5ci{{-!@Q+qILP4MSJkL zrTA=nn1lcw^4*3kqW?ARZOU{5O)kh078AznUH&T56F6b7; zRw?Az_`AI8#kF%~7P3RUl{Hl#>mK&V!RDNqJN`?>&=SpDijx}VD{GBs+9bIXD8XCX z?+Q^RuF%?jQOOoH2y5aq=Eq^%>ur=pt}`DKG`;V4{>ufC6*srk256aa;htS?DcFddZhp!%wrha1RPW&$Dvyu?vGJDFGreyB5)79R{ z8-=<6?1L02|88fhxqmOC1q6J`CgYhr0i`mO9&_q?OprZQI*-pWpj+$sN^PwjR*!?szH zL+H3ICDygDnRjAm(8?+7Jyu1@b;2;iG;xKod(jc%tWrJ|sp)=pbck#6LGHS|8(Cpk zjfeSuKYE=<-T|4cRT|QzHLD4ZU_aF^Y*~#HA+f0EVlS8HGc+<1)>Oi6MNK&cFVxz~ znsSunV!f)S&?m1Bv)Wti*=}p%duUX#38@yy+M48;;8tpaw7?}|?S=fEg?1?!zV_@# ztIh9`(NQoJa#L61l)}%J3%|v3Vf8*A9Kb~-8)_p_mLI};;Fae){fTglIVvwbzyEygj%^wpiaf#@usdc{!pCj7*{eqjB+4U3aIaDp1F zz{0_dn3DKfnpA}$Ou;;kWs{Fe%DrPbF_e4CkUH)giMi{zPk*Sd8y7Ag1pfB|S^9i@ zwAotoiR_?Y83jiY9^xDAbO3NZh`Z|Xok;+&o&f)(x5o>T6F24UqokwD@6*wT)E6Qy zSbc1!TJN)kETCQzs>-C2C{Vmcdk!77xmaP$v`E$S)t96}5J4%~30BkbcY-6K!!0n<~d&?hDd26A2F+ajlxE+{IjizwU=+sOv9wBVPMXgv{fwNBa-j zt$w5I^)#>`D3xWOr zXNT;XrwiHH)Atvum_410kzYq#0d)xyfw{Qh)jI_2tu*BgPygy!o*$P4hU7N-T5KYH z_|wTDDT%oRVB%TuDjSCzQY2?{y!R01C!+nq8achRJY5KBKjx3trwbwN$EZtUo`6D3 z#W&XO;~}wi_;Xx(B=YTz^>K#4ZI+!rTcv}recq+2ksY*de?c%xi}h9uD;CHmtT zxQXq zlvtjv2K<=!b?+l8l#G742^&W+)>bnT;i#Y^y7extf(*K^%TCa0K%8`1Pr812vP zrXvlM+HPg%qer^zb}k_@$v3tXGa~gYH>}hLnTF?%x8tsmAIsgGmGg|u>Rv8BwsL6R zeTP{UVC+FGowQwYdSOxRp~FBB87TQEAs_A;lM}x^Qw|>}*lWw4iPL_F;(xj{mOoRm zAzY>_o=HtXPf0zn@)_qW4ya_>s_cgZrmLSxl2b)rvx{Pu);!}Rr|_e?C&7lb*)2y{ ziEC?~iCyd&<7!oA!`k@DE!Hju)g@?c^6jjD#*g_EoF^zu?FYtPfU{}v9^kQvus#AK z4LNSCpu|IjrTVJq^Axh!l1SuMt3r^OU75Z-2V6or$6rA!E8}ngiu=2(a1AF_q#qK7o?3P(A~`TnE$|A zk%^7R$Ayb#26B!(?hOX0Y4VYs$aSD%C|248u&Y}|opp95s^Lo!E>hloVW$E>MMKWm z=^ULf96J+Bp_!2H00P3jUG!^{)vR!sw5jNpN)KG6USfPYp1F68^j+WDj3a6(3OMUM zj1qRn)aG*AC7umI)#l@KQ5z|v6qT{yupJuE`MBzR!Fu)EVvY~(9iEg?{WH$|WRZcI`&Y1+5*V;bsoZ9!j8-RDR0`CYXZq61Q`(BJ8`GY$<>_ zLOIDhiLjBN>#lDv!UYAmkJ>$NN}<{=yYC|(RQl-=dyq?ikPwN72^8|;tUbzCstjPy zmU|Ivmo0m?kO6Gu%wzepalj$Xilb8q-il`nSs^i$=kBqU&jwjR3GsogicdTUTA3H; zZS}K(j>=<8cuD?bp(#AKHolN>Dklgj32UAWs*x41tTsOa>6lraf3!G>+mzJ62f*xl z2fetUT0`{eHVsFy#(c&Ymo*ZiU{f{vWf0Q+3Y&d7KCbJu7M~m^?*|sG)n8E3)7lb3 z^mSgDy4{CY+HbTR32yJmh;sZUXzxImP1{U7(;cEDD&kaT9nzv= zwYda)_fDKTs@4kK7lkJ8PCP5pFqk2w`PJ%~+rg2FZ84sCs$Y$SV3OP4jxu-HuIGaF zjzThT_^+RFK7^{=^!K`M?NSzx+CPRr2I&29C6&^Ua@7h zguqA@l5wDmS3*uZ)|mKkOrtO*TWj`4|c`Y z)~2w6NlM|}HMQjuZrN?Mk2^x4?UzHHf4OB(ht<~>61F&bkf7IwYzu8w9E@_uG}Z=n zK@KAC1QfU_TR|D<5iWyF-?il|4JyH0i}(K0EA`fz5Rc%}`GK|hV9K_7)Y|hMYDc8( zj_8$hguv_`&U<~CIImZ5dF+f9MJA9Cga_-seD;qLkk7gP&bn*;#4APZWr)%DaF-t5 z(Ce%6`yJLFZ3<^0RsoVNerdr59s0rEK^w_kI@pKD+E@&wutpR$fP~XVZG|V;uZmmh zcrWv z&RvFE$cGZiobegsFNsakyau7LQc0u=;>laAO@d_$ke2Z&Z| z%$@2Pd5S?g^?f%`Stgo$*@^Nsii5kK4^(|uEgnP<$}ovu91iGvdzc6aKj!g38M5~% zkV|=h*9DnC?>TDA)+OgJM{gJkTfXked&{|duU4~ST^x3-a32V$mFvpkBV7k9#X6@Y z4v(q@5TdrO5Q9Jc(bmLUUG&Osp;XUn9gsp}zO|a)42wTWA@Ne`)mGmsPgGHHT3tMI z8>{rdGge=dBO2|#?!bhOHXBeM84?S(pZwi@_IPi_N zRO2Z_jl|)UkHqFUDh5=iz^G0lWMKQn{Ify-uC*Mtb58j{i6 zJ#fI?@x5CNit^ifeW?zzMVy8HSSo3SMX_8`Avv}UR&S&s7Ipa|DB4KXE7|j*Ys#TN zRz3TvI7>tc69GQLpF%gmiReKZ=d+CT1zLlXKCRKB0rkIs z{u}busl+>&aMYjcg8Xx1+L@}G!UVyN{%Diyf&?kk0F$2bb=%Ww(+LF~DUF}aWK-*M6m4_bfN9!THv^<`7>8elRj*2e~7Pb|(~>qAT_5tq}& z?{QT;<35K+aB97JeQX(?l3Z+B)ivuw@)@+X>w^{H)RYZc=nA6Oh!j(I*!M)lB0?RP!#HkSaOWiFdiq@2V+G0&zv9y z_D=ZRi86wJJCzFoKW?WJeTV~>#_f!cBKbr`Sc+HPWI|K4%c?xFU{enI<{dU2Z$$=) zH*|rc`r^kTs3|?n?D}6_9NS6-#C$Jb0Oq2||M_@UZs{`T&ut;Mslq1QkC$XLoX9n_KO!|BW^R=Hf9jz`+<-}em`pWC(h@qwc%*;bPu!$A_`{#?p7^57Az zq_#jjllns305w{zKEK-e`?fY~h!!Qsq9P*rsxfCknG?BXHsu$a6@k~BK#-4x8MI^v z-22nqvkF{n^$);`d8CQ9=gQe?@38jg3b~ZK@KNi?ez*|XmA&4oZKHKoJ;FE^pueedXo$=q8}yM$ScRL_NH$iABe_M|SO82)BkWVh z0sxUasmsaJY2)4jR?jKSq0t;x7S{Ge_I(6v)ycpM7j!(=iD&+vc&^a&&AWEmspo3aoP3$?e;ws%LWl_8^4m=klg14pT0m&qTwt zH!JhwtS{=~d!X!Wp6&ilwLv9WabY5~Swy)f#*h_#(C+0t(d!Y7bw9RT{Kl8| zAomMOgex4y*aMo(myLI)JxaXTgI9pn`RNpQ4Jg0JsI;suF8-c_NQlL9vOJnardr8) zwxX__8ocTyfz$>4kdh1AslQY9i7{JUmy}0oM7wQGHD<9>mt|X97pOO^0lB7YvJqYd z&~ZE)uUKt7ljQ#i2LOGW)x|UYLL6TnSwokfOz0eZw(0qy_dL?1eok%-F?#wpt{m$RV`@;5C{dlEiBY{c$#AdQRo`|@a*A+^L^PbH` z%Hn3#S7TkuQ+7)bK&e z3UOH-y~#uNYg<$G=68@H*w^M)#hF{Sl^?C?;x3)H2?q$p zI6Apg6+lXSUz}0xbUfqabM{J!cRSL46UJwt3VvEXtx@DsGH>`hvX~T)`;Hfqk&t}t?1b;#-dqd> zOY3ytFcMRS$MyxNM8ZPByAxD*8DNaw7YeOk_m1^@Cvupi!fK2qla8}(8J;qdYt5-e zZ-wtgE@pAYbT6u7)s{~|5yx{2P^v-lSm1MZ{E26>iHp*o@MSqb0M*Id4NjesOUXNP z!gqj>Rj7pnd!!WjJROZCsLHlA$L(?^!8|gGe2Z_{q&HfejQC1<`cLIjbtwJXbgaFu z0Gsh29rAIu+3ZYtLl5KDKNriBd0JJ{N7}3JY`$8!Qf(M+_H&R)_tb*#X3^A5J!My` z1yEgZQGdfBz2Q4}w99U0mq4Nz=F7ly>xH-- z_e<%FCq`mGDa<&4FY}Ur-%i$W|Led(yBqr_VgK)T&->{6OQ??SSBOa17~?!9A`cS2 zl9N*AMek6&ATjI5hb&|gFVXh>+}_`^{J=dJO@LX&p)pcVDk zrOuH2V}y&%RX5ITtAJ9Af3o4Fm#y{1Lj1(ULM0sXzJ81W?GE{WA#a58By~hLg<$~m zB%V*EJ%K53%N@B`${1ryBeE1giy2? zpUZ5m;HVi~f3Lk*DD_C6UC*Jwd=hk3yQ3=f7*6sxP6g<^?N;o*dxhz;%Y=ZptA4%$ zyrmdZ29ohYrK!N|FmX5G4*p_XYVs*4rGg>;_k4-C2&CA^{!nxMXb&nlignN)=7bL@ z%_x9A@?GkqqL|ee?Ea{?4+ef&eaO;eV!mL@>*LsND=x#tz1EjwyGP91%6i9ax7&Nv zhwt60`d~ApZTlTFYiGE_vE&95AjB989d%;edNR zY<1bX=!i-9G%P<_cYRRft(3EZN6FPgu}wg2zC1FZ#T<;@p7gXFsg_i(=a4#R$XI^% zXIvpi>{#{*egEFZV=vK2*ztUWu5Hjxl=6?OJ^&Fb#|C9f*2f1aWgw_rAFekVJ zTz(XTx=pL7@?2ix-^<+EwxJNH?Q(2gwxN)rvP@nTbNL1*Ll^#W4#Fb4!1uNyo=L{c z409{<8TZ@eqIsORRT~PK#25JAyKMD_I3z=OeGl0he@CGZNa)%PK^NtM##YPM$W7ig z8wvt%c&X89{R@>J=&`ySuEe9eF0wv)Nj*C(2~(7xVZ(+X(Z!#u(fj=Ptd$PMU$%7}-rwD2tKuoWhSr&j@Q;!%_z6wOzG&_7W2!fg1Da1_{rdIR>l-MHwf>mt1>pP22CEO@ z8901Duz4fVLMqEEHkPn+N1`#YWARpjH#0WAAq9PjRWk5i;@`)&&hZU}q;AGLB>NHH zPOgiS-jB)xfPxdbB9LR;+L-P)cFHF(fST;IPXG%O%#L@I&SN&26T)wbItM=BcHGST zn|>;9+e|L*2S?=TGwZJwC*Q-!>qxVw?-0J&^Zr6r4s9W(rmI*t1nh=05srR#H9*$& zjE05ddEOR%9OUK^4w_xt@azAsEABTwc28b+c0D?gF8XWk@Sl*YZalFTguF8_iHvgs z2ZoL!^xTRj&^&2<$%8}5*lH`}MhF9d$}9A0rAFbBckz_VYj(%yeLyQ>cQ?fKO!C}A zM=(`k7mguZ#jbiL-`IU$9%W~@2mZd)gv=27hw)AElmwe*VS69N_MB&Kqz!thoHH1% z0nX1d{{@(HaQe$%Dr5$h_y4gKFU5K1s{4}nj}thp^j5INWI}(*NsrTZSz5lb)%i>j z*L;bOxUGq2u0hp?E!f(Z5_91>fz|7B=sfD?o417^hH2YM+~-)qe;6=K%TtCp97O+?uL zP-f581c=l9rGVu4wPhnR1(ZhZFa28kpJ1x2!>4fcFg)wt3Q#bj^QFXy9#6#8ZTk}T zgI#!@Sa-mKvo%Lw!s12i%`RFHjQXPi{s%~%xD~S#gFZpkkRFNGTztPM&ix{n@K`+4 z>3q8g9ZVPb>qerm_7UUdr857~xZDn#fIr6Kc+qJr==n-jT) z2Nbf*eSXTZl1ijq1YMmD90g1fpX>l$Kpy%fsz2NBQR%L!><$#CI7GKi`)jy++3;o@ zmXDve*_>Co4@xTiq=LQWz?85JmZ!Mi-FB=Y$?G9o z%2A-|b|-&yK(z11G<2&ddTN1FbuSTq7+w^e(EaQk>WUTcH7^Gt z874O~?kV1@%6me2>*51>P?AXpsy1!)-ePf*ne}2#Q zMUBV>6_#xx;rjzVk*8jcLnd{0QN^FGrlfmcmjz_u@iTPo@_72hCIhJ)Irz6^r;)BhzU7OPo4f3qFXXyE+Vvc42lCsE_)M}7?$$^m7wl#ZhEDQxyOo=? z=df;t+x~KKa+A!8OR)w@^bIScXoYesPTQT|td84;6mBqWcVkE_8^zqH6zlgKx@Z4n z_p|>}OyxoDj_#;(+8$P4DLbOM}8B51yLF9;qwT3`N@ zIxvqxdPV$F_VLTMvcb>2Tn@gXEBQ*V%C9#MbW=^C`g(yWt!XGYt%`1I8_JQR^7db? zCb~g5DbY1r+}eal$(ri2PYwnwMl_e3&3E8Gq{JE;oTj-T4EF;6QV`A>qxE^OH1Q8~ zgVt0HNVH5jjbB@H^dLn3%)iyYAazT&miW!y%m1L&!6#F|khN7KklU#ogmc#J?+0aa z>ByL2U<&Ai0`oCj(} zrS&%iX+^RC9|s%a5*R*=KWndzL}x4qOo>}y$8rEp6wD^(oKNjoJkxMW8m{b=<4-(O z-AxHl$Fu*DUQiGh6W{7SM7Wa;&SA=^C@M`cXFmGcv!f9PJP~s!ax8O2e^*WzR_@rHS zHfJd7JYz{`KbL(E<5I}B&Sx9Quvqgc$nZin3hr#8IG6{n<`Llf|Hc-5N`vI(=&@_r zyku+f;vd(exvIfq1IiYv8bOD054~9p467)7<0Rg4e1n8-wA(%evIS5sRULSU&QiaH% zDRr;C60`!c^C`r^*_K;Dz;8`$~{N ze0QxbfK{Y+@#Wyd`fMOwV#6zOoVN9#*+TQ1@DCH*M8aQd${wUt16+9*HOI5_h*BsP z@l>Lqy#vrqM)mBHjD;!+(3U+&YUM7)h0)p*hJ&yS9nnA@on&w*^Ioc*Rq(8yiisclaz(M~&HSAy@x=O0nksMti zsjtbkf*h?D#8D+Yzm^vfihc`-$8}LTd1+ zjV*?(U=W;mVlMLexJW9$Q#qH7WZF3G*nyUouzr6bq=SCElE#IBkB;;YK*y${uM)xl zjZpQ$>Fi5v?MyY8d>%^>jLil(aK|t}a|vp-2bVrt-+ z{3PqK#qcfCxWw&oI(CGVz z`JZSd@$L-kwf~f^d@1++Fji*p;7+Wa>agC;d2-)Ud^&;S&O%2LC-4*4{09MwBnnp^ z;c&+x_}U)&HbhUdv*P*K^eNbOQ1|c1K7}y-Iq{aW*T3!uq z0qj@GHVAsFH&A+gsBha7(3Rp7pl5ru;H+TH=OuO)|FUJbok_@%V0&ybCpFZw-==cVQsbJsCN^Dx`Mu5f-vkPMXtVxz$;&b4 zT^t-@cWgd4UOFh|x$x?*{~IbO2s4Q`ouSbc1#@>bM;;#Ownay~_uK4ReBenoA(r?P z`+>dgbJ$2M#tk2SP=4Mw1H}KJ&s#aSm+CjXZntyE!2yiF9bdZ+BpC6qFC(O9=W8VZc_C>-Kh=IT<; z+=l8aglTJhEf!b0Tt$vG`J@k1=`lWbpAFy3mRN`|XmEGW2`!+Xt*-^*%Ym_kKDOoH zLmPkI(*GIQ1iCezgSdVUZOrrMx@TyoWgUhX8159QHeuUJ?#hE z=ij%=2-zJ|L!-ykdVOn%j=?0~|60%@ii-6r={#wKk;OKipOi2~ISw56mD_`LHa=z-;UJ|lnzoZwhj=YB1DQD{yhAQB&cmr_AVZR~ zgHv|;wN!q=JV5Kt#PaI~mGG$4KfW=OJ~&Q7Q~3?mf|T}hI(Jz9W}?9}3Awa)T%3V{yi{~orOIIu7 zjZVvKp(5z?n8 zG#0|RW0%!>TcUHw2!@G#7*B}0MnAFAMTnA#vy?$)fdwX5!RnvrU)nnHkPZLzOvI}( zo;g~%>u*zIp?-#`D~n80xPv#|;m1ZiMa;MlQ1j8EBC;%0J$ z(*Ag~SKcheh^XSniN;@*SMo{o<^TF`I~gM=YtFPy_%CduDqwLcMsO~n82IdTLiCTX z(I1=%WGLz8fI;Cy{-#1IhG8v|=X7H^V^RN}Rj0A63hP?sajw%&*HIG7Mf_KIHqNCdf5W#D?Ie#9- zTdLcxS0fTc(<2t*Mz!2CC0@8B8H#7;L8eN8sUZ7~b9p<@a=A;DAwt5VZq%j5U@bP` zj&sL<>@PJP?k4D)qzt>4p|d@v;br8>*dt#SFWMuuP0FwW~_KUu-}9HA!jS|$>6|8gv=9{y6W|E zeBZ#EVYPpZo%nx3Dc8L2)JaiNI<9D5Y^@K4PdJ(m|8T>r^;Vm`4#~k}h;iy(kA>PO z{c8403KLx7vtM+J=?TrSdNFDP;Ibxj&0gT%m5Rh-n_BjN^ zOv=%_`*pt;+z~(--bgK4gyG)!*7c53%}^62o{4^Q?9o*crRP9w7oE2eUs%*_yKF3% zi=vvipXF5H9F(Nd#{D%}L;Csfcs4#HzWjv$VuXvRf=&jEg`NYQYEC&>oyc~WNWZsJ zIoonOC{y8Ab~*vt@^5w~@eNi?I*okQbe zy}e8_LpGP~OL^8}1DcQb#lX7|*rrSUYQPAWR!$VY+qPKE;uH4lTD8VX8{+!F>yD#G zaKyinla!3l@e>-|%x5SVy*XNVYV}&5FoZm3?QZ7)3l5pA` zhijNfRPa~w3FnJ+$!|EJ(J=G2CI<~`pOUk#^%kTalg+#)f9>qSPpI~dAP@UV#!>f19A+$s zMZ-{E_4XxJAYDpR*6@a(H=UqMJd^s*d6&lgyzdBsp5#Yq%Fh3lJE!@LLL7_js3l;= z`&an_ViYag#lrN?T3a=%DP+C&HEybTH+C0;9x0bH4XDk)uqh##BL4Pj9vG#$!Q2{6^K&ITm3t zJ}6=##8ZhBujO|Bfir*ug9m#{5KfNrbKHp*;@TA$$goveAY5UibK zOn$SVQP*If)nzwj>7@Q3&qzEgR_M5p8?v2PF0e@{yc!)MX+aU=t*L4vR}x|TtTpG< z;C6ZjmBOecS}Bp^94A8Blh%Nn0!Scfo5Q(-tRvo5dXpm*ZYyf$j_P}*HzQF1R{p)d znq3FQ1$0IO-7UncXkNB2o)rv)PsgHd-To0=FKg3V^(yO~j?9$xzgegrmG!Wzcw@4! zLxxA*bTV2z0zf7&a;N-4W4Y}Z>0h>ExgCY=9e)C@?;#Q}F?QTH0wunk$n6~*ke%sd zv@WT5nC2Dq&J!_Ba0C%08hFf#R(sk`=Z-@s<)nNj+Q_Zv`!cIpn~Z1YNz^_=$CARQ z@|hy13V-~QO*`=G*Z=9SAJ^GTw1{+K(B&&Oo67+1DTGrMnuLclHkT8}j7_adpQQ4q zFT@Lp=`(gUr;JsYQ?i)QZ!Yzg?An{bFo+`rQl)nG6#NKLa4y%YJ=wn9Zuk$3Q5@x) zzH~cx7?ELov9w;OMw(Jg`J}jL zC^^E~Y=-^^A}dd@nrK$24acYi@-_wIA>S|Af$DrVI4Jg0yrjPRMCuY3Y8!Giav^>L zxxU0|HRh~_|7}z{ho&4oa%h3^%|3^p{~IcUmYB3d;tE60huYdysNCjl))vbuV)eDP z$Dle;7a^$%5$k9Qx=d6N>Cq7b_eQ_+5tcjWWAj{R^ro?dpY|6Uhlb!gW)c9o)H2u=Zj{-aqiUkYjWQQY?G6Mgrc!JV z=vL-^enSI?Y$2EGL!~RZ+VrcJ^MJ)YQYwxJ;TC;#(2v!>mgv~u2V0Cw>-9vWqR7M| zj@wwaVe%y@!Oa|nr1TWJjs6GOd~9A>yt?2bPJ*f z$vNV(t*+WlF+MkjBKzKo^Pm_K0%EcWI3R|Y%so~ULo44ZiHhDT1PNXUit$#E5Q^+6 zXv^x~auVp$zmh|G^_x_9HslBJ1rX2?YmD(^7@cLjA5ic#B}gS{gEjk4jDQ4Q5#N_n zKiJwZ#b^p4nKG?%R$gH(yTx|VCGkVd;66PNn&7UFf9z&HE1Z$|qglJ zQ^|4k?JN8xjz**0NGtPmWrv1O`4*C}>h0K=&Alj=DL!(C3=T z%2&Q!=1pxI^fWcmKn`~DO{bcQ)xQ1f3;!uSzpMaOmp#Ems;sUY#Ore?DtlT(%vH4J zk~O~Vi`-^S{#=KTU1@$hjz1CAN|)V|UA&0mxApDd!;1Gx@evA2-#q=`T;v-|(0A?cf|We>b`Yo%xT$c{@%#u; z4)#MXly>~>a?Zq=pUBtfNb({)8P7`EBxWp=)kGrCcCsUo{G03PR6fEfiQ*RO?eyF3 zhW1YpY9@uet^vb1m#^+51=3+{m ztv-{e>o(Up-;$C7mf|o~i|N)XkOD>4<}=i$3hCirtBGfXP@~EdjU(Ut zI|Z|ckl*Xx@pbWVuX1BBJy~bOA$?}`KCj{)Vh!&U0#o~pHF~e`j*8x=cZbEDEp;cb3^_^h84wclrZN5N@lY3gM{T(MtQYBJj->dVBf@Im?~ZIw2L>zIQ^yS_MPVq&xa|j?`^U#hPQi36$>SL)Py= z^-+?{24ks;gAm~S1mW?&olK47>MP=c(?wk>8_W5l{>O)wY|jqt&}dlq$E!sncd_OO zi+#N6=n`zTdpw_TK(MUf10V0TPeR785U(eye!edyWo7L=<=?<@;koAf_z|u)J$5D+ zkz*>YE7HTHFB%Mc&;AjciZ5{zwCQR&@iDcTgbhN8ItV5OjY>RIY%_QRJX1Co&y=I^ z3dkUB@qB&@ii@+Kg}q+L^+vbL)UNu#KmR*NaxvGg7a2&oMXvcAa4|rrxb8o;AC~jl zjdy~elUCU?!rn}zI7~eskn>h{hL(ncY)9>O4y-gky|$Ed;IJ-m;O5{y@5D2Rli-D+ z{><*?v)FzjjO|{IHQa;J_Vx*;njMC--SJh=_&5_VXiowJv!NHZKkz<_@$pS zYYy&4N;giyMUMTl=1{tz2RmoWn*;TcUsU#LMRTD-m=SP(S=n4Dj$A=)RdYG<8(wE+U9b~@#Cmzj%8I`IFah4R?GqSW3YjduWK%67%gXU)2eUw za|`VdKc+ho{8iEJhUTO*iokkOD^|{+HG0QK2C&y*&9)|==i=l>2|CT$ue?*}QPxcV zt|i;;SL|JD0vuWPNDA*WQYmaVRM|74h9pMGNwy(c*NpC{jRC3eDJ=Sj42A=aDpu3DG8f58adna!fS`t#%g#X_=;T z2B>=BL}wh;HgC1rd_lSsnEkn&2(}lg%YWh?mG)I7FqGwOKEQ;KKxG#ypudN1TumH> z-}(?Y-^FSY5FC2Z!e48SdrA3WMM17t9e$1saU(w|nzZ6^X*XluQo72Hf1zk6l(+Mi z?~#fSpm|9QAhE42Ia(D9+#R1QN#O6sSdzgAj`8PP`|ZAeN(o?ujt9{QtpL2nL+=QO zuYa>gfslx?w$Z5p`OkMKcR#oDks29QaS;1?g@1;mE6*?3XhKW>5uIW-##;n>|~wHt$QkOhi_ zax6=oe>yz+q18v9()sF`??a~ z1wNId+AM0qeH(wRDbCp?S(zk`AJu{MwQQbRJDK!c{)K8v_})%8JGy{n2Qe z4U;Ysv$G8b>s5L)bWkw zlKA;=9HSGyQgC2sQ&0N7>>xAw#BX4hYV@p~dZN=G?R2zo({IUlXlLS?`x+J%mrYhZ zDJi+jEmIr)W?&bw;%NCrvlh_hi9j#dbWB{oq~!Xsc;!ginyE&>4UkoqEqOLtYXfxy z$+GQ~cdi;6a|8=0vOgcQ&`G>x3qEB@#aE*-`d!f5udqc&?`~3%9^~-ds0G6 z8GoZ1NW8nJ^}FfIj8HRRw{j;PEl^r0eoIA7Zs*>RpNFgmOFrPvEq2Et>!E_hUGItf zydT=V+-dG-yie?Yf&y1p3Y$GhP$WmQ8ec;mAI3AC8ftnW>YlSlKGIPr%BCa&=rte$ z^66d{&!m}?N0ee#mv3^KQpiL>QKgoUo9~Kft^Oq%DoAkUrsP`wh9P{_rb4Hls(Z>- zZ~D#V7YAA=P-1srOg0s4C5wTsJX^b|U@LfyRVSfB69itMhnct9guc{ml7@ynjNXOH z;Ps=nsE;O6EQ<`Pv|2+pQJq-g?>40hs4DK+R80y$A*Neo8wUoy74z_>Pyp>8CF(zB zOEw@l+_v~7U%F4NJvst5_&nj~H`d`D!F`>zy_V=&F zeG@0-2@jw7_{J%@WBo!bg!A;S`9T?gpnKH^9z0}=K5!QW1fUeriO80Ggh@vk z$rD>r_}=hOHd8K$%xHc(Ho*Md^5>)g7~OBTbEyvyK!E_Xl)qK1oISqd-wl8WcYTXT z<+yOqAJ^1$+Wi2gvJygFAH?D+;`XdPj9;X@;#fS2*~SX}@?Q%n)ET$UDVeAs_xWG` zh6K87bHUt!!K1E=UcT8iBGeY5%C^5;oRr4=06LH80IzAnaODs?XRG3w2t8qgVqtCd zW+xoVD)X(a@hs>5PwxQ#)x@e&}iDJ#l1AcQM zGNi~ZHMSZacMJ(s!|$!pM_!zKm*s8pA0^+CHT%a3IwU!twIslWBWsOcpAqu#jkRt5 zwNXmoCu&3fubsVr`_|e6FnOH*1QyF%wPSPe4pfyY%Ei6jNP@g9IfD{NJ0H&o0`9JJ z`{J4Ar}Qim(7SUko0WXidZP)ruTum4x@`S1BT0Ba*`Tk)#*XdOlo-i1IzYnNF>gou zijkfkJ5~X}ZNsV~fZ2Ej!1u+tiqh4N$1|b8Sw@t0;Y0-mG+z6R=J;ej*{(y||?A$DqZl z3B*-I(t`w~%-`|vVIuRNai5h#*`sQva*9$towH^wDf65sGte^swK&-->;CeVxP}P^ zu@x<4BAc<3t!#06g=$j%kzC)9o0h^$P#Y4DR{y(en55!syhjd`Cbc%cWTgr}sfd}H zc;@;IX9jA0uyrriT3t&VNrJULloVL+?GfG-XDjCwV*VY{fwFRqEy0r?>KjtHfAmlM zG?KeGg)7$V{Y#W;i3N}z7$>xBjMnjMZ752jHgCLZH@;}m_#B3|1?z~$@+Sr93TfNh z5<&5mg47eqcX}sB$zZTAK6AZEvD{gA^-R`E?caIpZ3)KdAPHHyrI=@bj884$U;bu; z@#|BJ2zagLLaA7-7JJNJlm1TZ@39!6Bx_>RHwq94*7 zb)o`L(Ocyfx05lxECm;B!uLQW3du=Sb;O~eIc23em8x%Wx`JvHR5=sv@@UQZ5v1K@ z1+UVqpk)9(r{b9h7&xj-JF*p5>KX4FR$+*GO0!jKbR5iB{L)-JE3g2OTyf`f6_A6m z%;9h1dquplUCnox+KT~d(Pw$2sDZD=0$|_b#=RcDM35LOa7+ufXL@ltU0(Y@G? zE{Z1FeIK8KKYQ#!G?tHnJnFOr4;`j0|2Arm{7)Pr&9?YC7hCb~1-#3)Y>U$iad1D` z^6J+-x95tg{ohLle*U$UTM7*!;WXFzs%TK4W>)x2+UhNFOORZ|&E9LgQ&L|lYvEe& zfV>+Ak6O)^$M`9IIrl^G*|qWJJhwTROU8{KK^kFhoezp}CCYTcFx6L(3eR=)5nHwd zp}{{_?oigaC2SoKlz;O4yn=Gw-hSM%s#h{s-G{p<^9C%_0}KBgO~&&IScL`)&Z62_qNaBD>bV< z;;}pXf$5vpE^_dDeNl^L)cSJ*%5I0Id$3wcrG$9G|J+7=c%JFbA9G+;T-lDrn`wM+ zzO(UMq^+6E@A#H-dkkPZIT7$k?<~z98c#bJ&-_?AFEFvCknynI+No%wW%~)V6@z-( zM+NRt>QbF?corvjZ?VZOK|4{O783?}o^zTbA3;j-e6Kp36Ly1)KiNz?lgEe1@C@wE zY&^qMTb$43TruYZ7vCMy4MhvN46(29Gq{yhy`!fVD*_f=^%=po69I{PN&YNpTkM)I z8upV>%F{|acKtClV3JnlQ@atH<2R_DNe3xBznQIBQMuPCVZ*K1mQOKPp0nH0LbxQK zpzqnLr7cAO{Oe!sPKBc~#eK%xasfhk?#6endvYDbz35r_{C}|f-pl6g_P{rqq~rF` z|Ln&%@KLlr|BrL*wrwpm1e1VlWy`k4`RN-T9_9Qj-6p`U^RV(A! z1x~zJ&ccOc&-=UfAGOu_3*G-qwq|RgL<2Hr*joRwI7x*btJzvekUKMRdGuOs_Fam1 z1s&O1R*H0#_1W*AM-LN)7j4Um1Fy2VHg0uvhtxEAV=xx|qcwXcn+L2VA!y^8Y28|I zeYe;d#oWa=D90fL?G7HuCUYZ-SL@hX$OJKN9k$np>V-CSdgt2)Y@c`DRpjCB_;H9_ z0oI$V&5cjKKi?>qapB3AH z@@PEffJYL_^y5_vS#v$N-wB6YnR-vgYegt|zUx_=aIj01pq;83C<(C)py%y$PG$H5 zxO8|4|A0~lxExRB9+47Zz^1CtFUctY67q>nZ_T^JvusLV>N`DC0ZU~QTEk!2tOMJP zGv8di_8Zu_kokCq(--i$V6l+!9qA%<$47QGLHc_67fpGc9%U!+ z%8L5BfZL00^OKJES)n*8=DjSQ$(}7~Oio(M{TXyqH7te;C%j@?In6j%g7_=9#nr6} zx1;z2uG$s{q;{RH-d4`y?w&zgv#k&wj)ia`gJp9&byju#3aS_%gd}7JC zuch0q%LW}JAgbTyC?rhCeAdOdXARqat@}@Us9B?Tt>oh$Skty(K8RQt#THg9mdy@7 z%knL^(c3h;2dr(_0&;hEx%GIqsr-o`ORXZK|OsBh?@$+a5=^4ZgzY*3r}9}zbLTsJIw666;c`m)62~+7&QDg0KnRgo zK+!xHj9HtC311+0*57S9ekt4AjLqcq51_duz&8giYJA=I=WVWfMi$rt9BG%UXBSX< zczU>nc*bocJqkSjtX)l@ibrs>!@+Cf|1>cmTvh951$|BCp~C3~j|MXhZsqqPSKdZO`Ft#MX+ zj^GpzE4yAYHxgzqGcs;O_6YSWRm=GZC3&wf^G0Emqf>9PkiP z6UB42-rFiKpdyYMT4Os&BINb38e0odRsJv}ooqc)gpKBEZl2l3&5M$$TPgq*`z@Yj z!CG5`0KeK`ZSiG^UWFvMr?ONA?X9VNItMr35$#Zbizby)B?=A1j8mGdK6RMrW_HK{<|c+5v@bJCbp_L&N!W!2-8ShMvvspg zIpU$|@GMc&F7X1BD zdHi2ZgvX?)Ac)2M_V+vOTC4@D1v?OzN>MjmPh8!^iM^3C9HFF+-Skr`=e}Dpw)98S zjI994+pXnlE$Q7kU>rK6kR=}!FJt`fs|iu_j%Aib>TWy}gD3G*{L8(Zn7q~tf&bR- z`*w6wXp_tgsO%o(>^6J-golCipMl?>*dw1sDZyj5y-=57rm$>#s#(5;+gcvKC{Tey ze8u)Sx5PbvG%y6kTMEdvX!@jS#6(Sf@47n_S7KZ0&Xzi*N%u zU^O}HFaM78Si3#$V+^-cAg$Z(6wj@1T%w%w`grEzs0RZ!L?;yLW#JmD7EU+Fkz-BU z3v~d`x~w@`E2QqCwN&5g5^D8d7@J3X`mHU#`T!hhcc=*w=sIK_-t0h6LFm2JfS2Jk zJ?BAZ&g9)v26|sKK@i3WCvlkiSGlT0^$>dt6WQzI;WW3$`r`{(d#=za|Jq={ihzJJ zcEnr%{NGs9F^7(73asg|?S-Dh^#2`R{j7~g!{b~!$17M>2vOLiijRr+v*sgv6a zz2CCSCMuwI+o|en*QQf5z)t5(Nhn05$eCOxr3{&j_F?^i1;O_{p2>zn zDFJ-6-{Zz@vyNm3dnia#4Gaw{lVc0$ zq^@oEOIHdZ(Rc-RmpE3t?w!ePwcBpwFe4aoZ&tvUO7RMXvs>QaAX$*`agPS_vXKt$ zTUh+1c;=4PTt{%Rcj8&bT}kbK*E>3d(@&S(OL!w%aMwxD1PCQFQWV!?A^F*yU+zKd)LoaxfOi&w)|aR5nURtNyx3`L-YqodSsJL1yK&VNHpuRtJ` z-43x1apjD7FTy}5z}UON;kEU29U8@+8@&eOBK5wF=WhI->)?4Uzj5h zLmK>(KEl9(qY8eTsCv}3r)m_J>{P%&9eAwgr=x|$`30b)5}2LIL3Xeqli7glPp)5| z6Dm5DfJ^G;8Ur*w*(!k3EPbn2-e~T1%XeRYoOPA7u|k>~t%Iz2}5NS1s0Xq{7c^*?WQAWHIZt z<2CNDEc$}6*1Q+E%Mp={ zcx|*1yGvh%HLHoyJ|m8fAm8|6~ z<@L|SIv9ZaO(G{)&JymIU)zY!6|ZU=doM`(VU&q|T!hEoi?dZ!BXYVP&wkzRs$qM4`J11%!AZJ;bxm&?REIEiRJt6?jVH z>iWfn1dpV=au>Rq-?=meCxMZ@kp7>9#wsV3nON2aU%D~O#pjJn^OqvfjZqK25Na*a`MB& zG+EK+MQyq_tleAEz+`F9=&zJ;S?`4kZq)ZYzg~1In_mi3XwpasvGkW!Bmp z5m!^}gEeghaiI3Z%lY43a!RiYNZiUO-f)V`NLh61eFT)ghxc1UTVQ`z_lLzVt1;i= zCuvR5iL8h48e$8epb8*qj-ix)sU@H+-50jvJ8O++q;NnlsNpYJTRfAbe*uJ)r$u|4 zb2|vHFk$7kEQ${DNEXJVqs=8ZJOuappY`>xT4!6(6V8A0CoUXZxa8e`rep8=~`GB(A9LuJKhj!iNcr_gv{T$rm zxts^^%u)t{6W&J`sh>|K(A}dLNhh+U3$(*_sv5#w4XH_Gryn=MsdnI~ory+LD1Iu3 zMx@rsL+dzIVidUl}EZ)9WSkRi(Ob=%Df z?MOdWxSv!O+^Rlg-*5X~{ro?<(U;;AWi?jhvpfNHU(MK^oLtFjf~9W|t!K_>chT3~ z%b)B8Hp=_HXMG%x2f1hH@d)(K?NAaOt@Wb>hY)(Co5nwGM%lj=1>NWiS1u;A6c5v{TuL8+HUnz2i&?ocqx@^_IRs@5x+p1Z9SQD- zT?Y=>-dw)IrFQOcx|%onjP869CEJ%n^&LJuiv77ee;-7KV>5eWXxCHeUx+|F15FAO zF9Y77Pmm;ZP_&U9g`ynnvatlJRsM>{eChYYB8_{Kk&oUreZ+xy-tHCZ|{%{s{G1qwqR;6oIgRg!Wr-3fO>)GI7*M7UP=ZBL%E6VmLtPcALDM8E2uk!c}P@V+7YVja%;M&N{Ap$x0L&Le4dzLIB$28 zJPaPSd%4t#YQ7&ma|0Kp_jgdw2k{K=Nbc5fNp)cUL?VVq)qZ%6a_2#m1J-s1g&Kwd zBK&`6AtX3t+wz@$zC&=i4R+#rg5IG>gHA9yh-N&J~7t_gOwHsL)4jAK@peQUTw;9-5fAJ{KL zc|(G;k=(vR3Qx4gfS1S)wxnrip|m@odHvQL1L9i2UEy8;E#7>%un*Sycu|6Rilc08 zkKYuT?y-kMd-SSe+;U-`!?&O#o_UeclEI} zg2d|M0@dp+O5H~)ZW1-Z zyQ}W{Y|eIn3ET~s9N{IxoU{3OCO@o;%uPul7h())8L(TGh;9*e6ekJbfv(Y#)2RAd z1}Bf@yOs-qG7kUp>yCdozoT2f;cNW!f54;MbZq#5pu6a$pAB~%uqB@c zm4JH?s_ytc%i;HK!lOB3*}WG%N#ijESyoQT`;U7sI1e9g4{{ZYvxeUlPQD*jGg9sv zfA*;AL7A8o--UUxaItCu7a7a)vi!N%+9lRv-VwjyZ%i3C+QQ10)aZwpwqbV&#WM z5t~JU>bw`k08U7~4}gQ1q`Zb*$;M%d$5%CBLFF=SP5#|S&`(N9_>VvIQX1J>@-rpD zCTYm7AdUDnjmkJQY;6gL^zB`qVy8X7Is`{a)y#xltT1?Pdvmp9J0UK((;JlWOR|rB z`GpdkXnnhLwV)I;*6Ta*GogppAB*K$Z}*NL%yk&Rk$oh8$WAtvKiYZ@*|A;arZ_CV zCwh@jwS3qVVR$^DgmF{w(ur(?_r*8jemKu3cNKznsLv+itsgy4W`8P2!R zaW*;QtfsEbD9-W4wl_Y^;Jkzx@Q#Kk}k`nIa0*%59>*PnFS}gR z?xaVN)}LE#{Nmb$Qr1;tiAnO67L)Xy)qA@Ecdu&jSrDhhiEZ@$J+a%5*3@23(gEbP z=JrA*TTtX#i~rp{0G((}z%h16d-OTl_cmWV;_hAB-`-wM)6SjNk&7aA3Cs8i7xLcr zkmo+^BfGM=tIn8+bfDkazT66&Rfb6&(w%E?h*0|;>&;zw9}`M{w(VEk6+WyLbfu6w zi~Eg@WE;H6bUw#oG&c2@f45`p!3alesfWiAu>$IkR1i-(SB*-4~H|0CB zg%dHIb3@rG2H-5QCo|boOP|eF?Z0ef$<-tKcT8mHI&rthwh}4BU>6+RX~G zALVji@WW$ZZpA$CG=L2tF$M~~+rH^tAE+e8QgqEJ0J1v{*|tq~H$E&msLGj|w|n_a zi8ypB*z)_{8K8qf53+$w7!VRNli9-x0##luMoxPa3*bCf4wNQZuE2Pi|9a*7$xo@Eg&@RDDaSpk*QteD>YZJD_amEzw5zg}kzLNw-Gd zeo+=DE=Eu+L)-hozd2^%1LEt3z{Z8&k?XKcnGN>7ALMd)UP{h(NIJ*WWbxxZ>f2oZp)vf^MhEu}XsNN6KfyGyzt zu_?{lSWdOzyp~9+w9mnaW6|2P%!%9l-o|}tw{5fIv5CUbIXe-*@C`yyeKNn~V)*;d zN~J$#6S;6?))gtWQwbo<(G)qn}zw){wP5=da}@sV5x@Wa6VjGL=}gg@9U{$NwK zH&^LMQEGJV4jL1tE(}MzlifrS+r8Vb4ry-44V~XoGmyKsKPN}X%1>aIyd(zwYgBGj z02qk|x;y?ZxgCl><~=S>?$R`mC3c|Zk^aqd&1b)HJD1Wn`B|@Wi=K!U>EBEROFLN& zrn9Z3{@fIoZo)*4X2}9Wpn-Sb|&YC`;>xu@@uXNndX!Wcq+gCL(hJj z_N|BSpkS8wgi1qtkJ$uQ!SFU09m$(hPP(9-&1VZ4#BCve!7GwCpc=Sv>EOwS;?#iazos0?Vdtwe&1p> zdwe(0k&!lB8Jv~NQ%;1Q+q!m75UFm$YYtnTcLp~vYaXG;$OHA&0Mpz^x;wtGhCSu% zf#Mvm#yzP)B1Z5dC{0s#N=bU&H)p2@hdeI<{8P&wr=!kt>&nh-ajo%25raxu@z~9~ zZBHR)IG(myd#)#LCuE5Z5XF2dDnmj1dlPIp+pky`RjcZ(MwR$eZmWVZpV&U15DwgE zPr9@7Vd@oFZ*~ONAdfGxgZJnnt6{Lp~iiv!&8uXaDTefKCNf^B#ZAD{n4!5yM&)ArEY zkMxY$qa+k$E>y%e$pM> z+Cy|;hZ7TR)mQYPN;9iEf-sfD#PTa!onO5Aimiz+ih`;fABqI~Gi=Rdf+&hIlBPvF zUGEgpJz8x9x2FzjZ2 z_?6@ax7tEFG%S?7F9d?OW9YM76;Vq{XLu(D!^;|zguA&dGINxb&Aq&-5E~pZO73R~ zBx(;d%a}dLp^0CCLELkG-_vhNu~Sw&SMWi%h`*K3IcDKNe)k&z5hez(HqQldB2Rma z99Bn9j%h>d<2@}+Z+OGj`riY)iG;W=>cTK7F@phYrOL~T8ZpXj7X=f#&r+wxqXxo6-=F_A6$3{B4!Y_iKE<264Q2yDOX4SASao(qPtSkse;LEyGVKSjRHXdqu}tNJF+ zC4I9L*8W`3UE^cKwYQF_qPRACWlB2WT=kKGUd8;aIXGa1IW%eg{%e5fi5#mzZ!axb z&+x9HfrFff5ns_~+M2B%-8=o~A%7-v!+J~NI1t6s!R z^H$@dMe}wv!6lK06cop@PmIQ)Jt_!3MK~+I{R2CZ3s@2c(FhHZoy^Vu@sIz~rk^WM zT1%JBl6)Q9ouvstg$TT@WVIiKr5ZP?}$4rQXD!*kxpvrpMV1<@dBJr@%qc)m<} zb1BNsFlULVING-4U5~>AUal5Fq-lAQ?23aijL9Z;}4+?$>^Q@5~zuiihAw^O8cn58yx`~k#XG8)Mr2)9>of6i#n91GC zDoQv_r}L0v_oGaYj-+azK>Iz2GG`Gmb)hSJz8udI0h!~q;`y=GkVt z+wgo~g-3r%KJe!qSD}X}S6eP^Y!(-5qc`HI9Uk6ejrj{E3rjig0*52!w7CKkoB)_5 zE!Hw9v^=;$+t%m5Ur|C9Jkw@P@jb}oB3NpEz92{B7V=}8u@;}I!bcW_kPr}w1S>hO zta~bNI%}`yQHid(-^w%E5km=MDrBfwnoi$__I9f!P*3i#{;0{G7DE?#Z4c%|Ffx(| zS+w0qF5u%s*vQ4e%VHpF0U;|Uo{xvUIJ~0Q9<#|T!qTHgI0MMgsqCz)w$~2l?vCwO z3h5)y7gXK~MRnBS#7rK$l3YAk)J!;Wyqcw~a9x{xB1Z!)U|FI?JXuw9IRhDGZMrH` z;3LT&v7D=ha9poITC-919xM@LuKu;Tn7!|#QzCBqH=idHB{IRHggCnwvRAse?f#3o-#Ow9hTyEoOtDW7KX( zKdBWGn{X#U)~e3h-CPs0CiEiA9J71TMy!LBoS(v5-}gzlwEIDxh^Gd6(L2}+53z>S z_^1wSMV84G9FmskMYi&Vf_A|1CGz8{7vjoeS!JtV2%IrSB$HxXwdRF_A2^Pfi`iPQ zyc089WUAgduHE=7!D-1BYkdwF(O^ZjZ}0(0cv5^)8(s)Pr?`d?Pd=Iy1qN%Z;98AQ z<^p;*N!^ryDi-eM7ZPERU?MwUdLVWr2%p+kuPk?&9&38RFM<3e3HU|O+|I^m)>x6iJ99z?ls>wnfOj&ACNnlC6FSS^Il<5~jTZQSi!57NIuzSEpVj>Q_bjNQOP|n8@uKTP(Al&DP z=&F!|9Lq0||4zQ(C|&lIBFS-Eivv5AJC9YO%$djSa0Ln(Oc+Mi0Y_daSf?Ka+R^NO zNm+{&sp&&I=8M4MDmDA@7|apCav#zYIT%58#^H8$*~#b)^Mn#8^L-G=bk-UfCm090 z2W+OQ1+E38i>%mejCqQ{Lm-_PQzYp7z;O{K$ZzC)1+O$?<%^y3=^TIuUhvNdB(QAU zSIp;Z(f9YYH{R&5OJ231<7Hc_j*ImDqjovB9+F_puH;u_=rZECTKy&rD&Y{XTYyQZ%6=C=wkyRO@(HRtQjq-|mbue3M{2<)L)e zYV8HF{OOOazv|gz6yXtRgY9tymbo3;NPEhzi17q*MfTvT<3%KM?)o$l8?~8qa95MW%R;Uk=oaed2Oh|FYI`p z2?)Eza=oRb5e;)9S~?yq5_~)98(a7qsqLn#I#`9#PVxL^y!S5dmdz&EjE+wLXyr?r z%Qg`BdQ}uHY~ELNJ6fJxsQq1aTtYx`3t7T-ErM5Dj2=A7ng?#Xl(X&|=ZY;=3n|m3 z_r9z0)!5~{i%%j7>9;F+)Cp@CTHLGcAqCk@(&n)mdnGFGn)lz;ji8ATCH@Q2)!aHEQQ zd95)3)ti1lfA7UL61@7r?^4$;ney0+L8fentz7Y9kUEIN*^Ux8Vr7&mmWQ}|(o5N@ z7vr?bK!_w)*wVh}?2d@8k8I70DPOBe$;b0=YhNt46sN8(TbIKWPSsD-djGyO{U?86 z8}jeRN30>?8$l?#u^Kl58(QOwfx@~K1cac`FQ&>!DIQ%V$`O)s4Px{Q+x%h>3wwHd zOW^k{FP2N9aQRy+u#|cQL<*8^$`L(G9W!lyv0U!I`YUUBG0-+LVa`-*Ru$SN+gGo( zIqWPF?b>r9KO;J2M}mQ3q$rg;9SnH~)N)EtbyD{A`)V9I#A`Vi)001@-ZiDuU2c6o zlKno_%T7aB)?aj#?z93YN7AuE!cJsW1+_*cW0Ns`aaD>QAcKyns!TkIyFDGQmWi9m zvJw&3jyU)~`il-bn&8V)7*_RIuF^<9!Vfzh=U|A-b;4V<^5~v?@%ul5QH?lDZt3AL zySWfT7cSDYpEBj9Wx;2>tL=&?Je#Xq??>=jLG8ljPmod1=XCKr92bdju9`*(OOM#% zBU{LWx9JzQ7$;ijHt&|n<0&o1Y@p-&Wg<4UxUq$R1)tGO2L!*|V_ zJc+w@iRFGh2Y~_m5|l3n>_$~~3O}X7P}|MeRDYX_&ky^h_$IgFi%TM$k&`BzaMIZi z7ZksLMz~;id;&#yMevgZj(y|g+{=>#-LE%XYF8X*0qMRs8zpTXAsHU{nqjsKtMpR2 zjRSps*rg*zTk%r4^tf@2*~*s!XJAO}QutLbCC>N=(Zg!5%VrYl@g+aC&Qd_a@dP`y zxz^i|*qm!)>+)BhnA!C&B^fn>f17QH?iJhQBWs8*W!WaRM{!~;Eak?R9O+?zQt5FU zUn*E(2Pw5T#pmLA%iV#>waw8r8dMB6wp0Tt2*=i}kIYIuvaFw)5;BZt@EmW>257<; zt;K&qA?q<{jpi~MpJP`S;FjB>Oz2F8W<^7s^|;KKWbuW7200A82EpDsy~@&bH@3LL zyGlUA+Dn1%Yj_{@`*vX^BKJQGZv7*K>_DH5#DaNhM^BXJ1XVr0$Vycw<3sE=3NvjJ zF|qg1EJKszj`xwKrsP|kdMU84to5O$?Xa)&lZ4?vl5KG#BrncO1}Td=)P8TtgF-7i-gHhKX9wOAn~Bz9$Fyti#4Gd1 zSziPaOn>tBLK1EU09mvco+gj^uJQcd<#P<=6Uw5w63msW;$Owq1Y zGr_6}O9)a4$4q2#LZYo|iDj)KDal86^9OxvpIm`BXgXSUjZElm&g@~W(PIk1SRu^w9+y(`)g=2x)Br)=HJK(AovcAv+#V;Yt_ronDLo-a?>X!>f`_Zd!Sj)@hZZ*7Nt=YZg z>ktx)=vCW%orb0GYxjvRO%vwMI&w{kJlRjYS#C||%Z~4P=yl+&7X6{2m0Mgf@$HW? z=^a0Sg!mA0VKB-v7Kl#rh>yE8{p~-o`pcoV9aXRq7J}fuBN+GRjCP}dw8;dniEB9Z zvcDL9LyG-G9H_%N7Kjpzz_Gk&M-on+0mvwXmG9b7AHPrc_L%=IBM&=X^^E;nxqEU+ zk>Ke>^y1PGbQCb7CzqAEiS)-d?SKy9Cxz3{%MWE{(w4{;D#$XppVKy*b5!6&oAX~} zwUc0QJ~|fxCp4g)Kj$?-!|?jHkb5Lmn!DmJMkA-DBol+0i837_tXS#jGh3?aiIWf# z_{=URFiLV}SMnG8KWtZhjWDz#YC`Va_MLXUS}zQ|082t&Ss7sGAE4^pn6sNv&qI5- zld`8pyyfeKjUM>ywx3%vkl39ZWz(Z}H~%In=DmQt3XBW6bAQ?iC9LebQ?yI|`1#;R(Ax)QRKK|w#GZ!hUn4O zb|eLYD;}2n9R*K3^2jT15Sh(?EKTo0nX|zg;s3hB8af<*u^C`zJ!vzxF$Vz&;mTl* z9f{9KV2N!!Z11M*ua10qbMzr|8Kq|V5S7W6DAPlsjkw6c-I zx{@j69$HSBd+PXIugVm##Z{CyqD&I5FR7>S zVmEU-D3yqyWVfP@c2E8XsNwClm!bA2n0jc9?^F<6#8~fcM^KvzN!{Ch!0!1v$OZa- zEOJpkmuc!L#ka~HMH=Icuf&`9P`nkLG*+D`NJq&XZJS~S*j^#r+Z;9A zInSK@pqIs$e=#zv3`g7rXi1#rVK)^1d=NidwAy|vIUd`$OmxQnKhjoK21QRxqosSF?btWX@ zli33jiO9`56@Y11b=)LHJ?z6WCwMkH5;Y3l%f?7&cQm1FLzZ{U_jjn9%E$e~K*?)< z!ar=2qW zB`OanCfeXURJQ2?cXA=iBotzaBr9C>!wwDEI|vPTDXU2D#qdJ#OSuNZ>2}$#78*Yy z{@{RdCBKdh<7Ri&CykLL#MHHbQBjZIv+KD~+xc|Ee~{}CIl)a|oTcd(n(fvrffb0) zUYGgO?d+c&*kgBc^qsx3g0Q;@Z(-s59Nvqv60Dm!d=7HBAE3x>XU-nva$%fC5C}|B z?bU)3Ff|=V?pjoVw&K--P!x^FR=ygf!`@-scWhPu+{w1#f9$QLwL(MT%78Vm#;xn? z+K1T^B+J@YA0i>Rlx%|}?>#mZfvBy2HHeqT`xN@ZHbe_#NW9DqRf|(a@=o{`+Zbg+ z*2*_6#dBkh%q1Kk;@g;Qs#B}T(X9X-I-oMH*uZB6k}90EBn zUX4ev*nkx2ALmKIkXvFXnbAoeAn&|ZZz~@#LTt5FAWPxn&suv=U7LtW^~GRaM#Di>GD^0&u4q+ z(5rU;)j&mU`{fXhW~OQ@C9NXxUNn2nZ-So6Gld{nq}R`(GFH5njDrOB{+z6gGCd&p zib8drvQ-^VFOd~l~>*Je* zF_0p!m3<$B9wglKYvtDLY_W~6Ii@?n^=z!bQKo6P7v2B;N?^g3YmEAD!M9-_c569o z$+`zs;Z2ZrJ@d3J<*Fz`1pJq*)Go&vzyDH=LQxZ^INY4KkAmN+*&<|XNzqNo(*=p8tsEZMv6wd^RORx zE1>MIA1D%D6ei(bR7rO!#lpHDlTs|Kv-TirlpOU14S#6X8Rti2Y!T_L=!~P@Tpop$ zorztlsQ<+P;Dp=V%vwK-?apvbRuc&2N5zNSb>#X(X+=eGg{**!Q z7Zn=5 z9;ZGYi#=M3 z6W2(T4cBvPi^&%O=<{=ZbR25$rjLnrqqwtMKIRykb-NnrD`ur9A4HK`f;C-{0^SYy zU@q&-rueDU4c%X z8tp|u+7)POTh}NcHFSNy9`k@l{$KuM8y()I=}muTjnyEZN==C{@Jrj2gKWp1!#3w2 zm;`WLwsa*fOnPb&YDt!&Te|}9^$hn}lTQXWvUKN3a^ zR?|KOFE6Ta(MEEjT{TShte#!8jN61)FHY0gofUEBWLLf#L_0#uIcif5r8A?4a|BT} zTtR6Mj^w;rM~8Py@_RHZaSXmLh>xHpu~P7V+}h(&L&_S+Qu%n>iKu}Y5lq<0s*g_# zVIoV}>4g2;Ih%>^BC@6Rf(Wu%|Bg`ZjLrGFA3y>yA62D0Rniz4x17sW8AGU!vbNfO zavHGga?9{rvsl4_e?H^Ls_;^j5mttJ5}BN~rRYH_UJ)}q?S;%aWtX!y(q)&$pSCMi zuablmY-(4dj5~`AUUce*cCE_^3jNZvLRJrYExrW2qhH(gTr)@;^6(`L1?v_EOb&tk z25v^*sPd8QiirEhZh6_#GmMFY+ZLjY{RmVTAy_Z zTV7Yj$qfO3l!Xcc4hYuZAR@$4#_)~)i&cXXu}1&JA}j*xrf6Lhf}9{lCEr~2Sn~aG zKF-*dZr|63mL8{KUtNB(HT&V3$>vgiL`^6k{>JcN|81ZT@E(<$4j5Wly+*kI=u6KXmD4RV;RvAv{ z0|~$INtUkNjh^+IQir7#@&ZLwBjEs8YAxE$YO!Q_suP=wbt_?j1+I6YFT0%u(gEl) z-iiLi2AvY2#1h~2wUTwN-OF{ry#~!s)W&j1D*zeynLVgB`%|notfw664L`CKJq1e~ z5H+~6CpMf|>54SEswXf^t(TZ7oW#{V59yYkT54jY=$CIOBEM z%P)ufhv0l{eYB8i#81>vnR%I1{>i zGRoW((PP(4Rq%?r*FL;whXbG@)dC;|YjhW$BR$Ko91Jp!J~^41M{qk9%QlM`;i4UP zkh%^^lXt?;A-5XcvHuMvSF>qvvNVmFx6x)2aCiZ!R%F33>$S0$eBS2#?{5!7Cd@}& z=_Ws7u@xchTrTzqmQAN^A?E^5$1KDyan8=Oaf?-BNo1u$nz2h!R(x%!EibtxU-19! zIlJsb6CrTWu2f*o$?yr5c+sv#nc^-@aXb`L>snq7-E)cCu2&6(W5PT(%5hV)_EWfncb?&6v0vNH64uD6=l!ZynZ-&d$g!4q+>A6`|bCJcU|H*c4` z*Bfeqo~j_Vw$X=VKR-o8-ss(vsA7k0N?=+@(Vk z{UwYdZed;TcTfGX%QNMjRihl%zZ8ugYMcfI>FZg9+=%FszcGAIWYcr>W=X zotcDcq88{D1joUcj6Ox!>PAWQHx*?%lBjgJwBOj_T-&}uOcjn~w>X;-8-ad~#?~%N zyIiiY9$47{AFKKl-Q$t+k0+$hfO<|;72HktBeVo3t9^TDi|VY-bQ}U+*6)5}Gu5dr zCO_bh&$)Nx%eL7(9@|=N&Szfh?E`<+Y~By_kN&=$i=kywfn@-^-}|-@Wy*uCb@_&M zS**qLN>I@;5TsqI;FH_IGG@q@a=FM~jMtz;0Y?0}UCH&A9pQvs&BNmM19mNcV-f&v zwd>hY@sqkyP4p8uJm=>8Y~0Ltl8dmMw-VNot^-A1aQkn;>Ni`m!#lYRBrvzTerx*3 zOM5RN-7k{R?)xM*5%kp_I8?_Q?dVwH71#9!uGkB? z-!m>9@OtkXB`l`p8=^PKrR0wIE=aziH!vV!0H^g|+nAGyQ-pjRkQIunvA5jo$Dytf zN(|Yi-Z&0y6%JN)+w8z&Rv><3OMdPf94~R8w zV%}~oQFa>S&RbVf|F!x!C^AGFv}Fwryo8j*mbCWXf}+Rwax**f`_kF(-exMYq3iVT zMUGO-QGfeIQ7vk!{R#U%EI!3J3}!)JSC4dDHWC}f8H6*b=Q0*+8`xc&h+g&lY0nj@ zeX<(q<05#L`^=O>0{%wHTtTW-pz>QjZAW~Z;gLd`9<6G8%xZXegJZqrJ$wA=C+&EA zmR%icSB~S1orp5Ww4pT$;7P_tB&})RSM*Qrv_6x)loQ~xq*(_OI( z`tq(_jxx~|TINrGEd=jxx1$7Effqa$u6+E9cQ5OM4K0)Y(o^pFmd`?$eJ zU77|@HuNR01*>5j`wGr$-f4|qUhLorqtX}W1q*51Z8rCn11){lmV~W@n;){R-UBN8 zroQk05JorAmLsp_|5|fj!K+wtk;$PYK$if>jbUc3H75EVh{O3?(Rzw@(iSUUo;}?o zUN`q)(dj&6?a^|Xn;%PBqO9b^cx8L7Gr?DkGv3QuUtnlN^Ti#JOvecr#@+G(SK7?M zgd7t}8;K<(%R2;k-PdNUUbBhpyjw1$LpB*bDS{8&r#MlY$|HhpG0zL4ne4i$7qN`* zRL3hn`iUKlVPyLKfgMYz*!_38LYU;+@tj(pIO`Mn>uF8wq=Pkt&)sx174rov3=a9) zW~%N!g)Q^fYSxkY!W`wRvCo27LPtd~^SgH~r@vEbnCrff zkSLGYjocPwwFI}b+&RN1?{!^jFEb~iyJ zo-_)>d%45pxGhm}?q@&5V%cR6@)sv=x7n`5C{h!aebB#UF+>Ti*p>W?#NQ%`-?f!d zrr-=9f+dv4wrZCn6SqgvJ99oZVD+-v+Wi?eV9l;_?MM4Ajh+C(a^YibM4q?#UH(`~ihs`k z-aCv%??SXkPReP8Pvw1B%+mm~fv?J?ob?WfZd>x>{`BK^c~|EAkqtIpjWWy!tz4

_gqpVyRK>w2W@>*UqUTcC`u{Sd@sq4FfZSRvQtN0s)I5(Dc3(kX$QiSs%*$S2Z9BzBnkJ5_&QabN)qP3GMwBZ&`NCBERi-K+NF zBNTl1{aB86y>1U;@H5CcxvwSgm`Kx)&H598D3-FL1J8uFBECp@C4mxou4B$C`^yF$ zWEE|d|J|)1GpqaK4&jj{$B#ArNu^-e!Ia6=W^4O{4uRCWXzThN4Z1W#Xz+*HyY>AA zVf}*++u-9soj-03(QlC|G8>|q*v2SRPYB1ceE85B`zur-DsGc^Hco)Js4P~-^#dQ; zmSr6%aR6yYY-^OsiJZwJvH!}N`eVcKhm`fv=m7eS=9rZny2!U)b)^{iA{VfybNE62 zeb&|=2nCI?wR_V6$W??b9oZDIp$-~!vvqoPg3gf*u$KEJ-j!`&KHSpdgE6hEtz`G0H#1q*%%KKn|T{#ZMqL(S6vb^YaNl137 zKY4e8TFRzxDH?LFxvQX2F;~mh6B*Dl$@!JM7mqz^SM%?kgud>tL=dB(B-w&q&ykQB zfoaLp?2{-2HU7(aTZG9F+f-oaaT3RP(QapzF=^54j*s{4VUW$;>>P)uFcJ;y+-@L2Dzax2yjI1ZA8@U*Rq?gVTsAhFyi{#a<;@e z8ymOw{E5j2Mv5Ii0TL}K#&Bo;+PmY~$F1huFggJH-0wZhd4o{qXdMxG5Bks^gJ{H? zA{3SbQaw8o>tz!IDONAhiL}L&0V&~@iXA`sgKVl=Dwj`uEJG2v0BLd*f$eba@wR>> zVnk*?;-heIKVXlKMwdn6$4bix|CrYxkBFRQjt`^)0)gr-No7yuYRI~nK$Sk@nh1;O zT-a@(p_vL?od;Qyd}^~%R^s24xATU5=(SY-jAIxA^l8yA@yM)GfrX zyvMhF9lYZHFCrBB5H()?w#KTo&Wt>?e586Fnu}2^Msol@X@#YhKfFu2$J&1#; z&~&!juL4B#irsO{l!H$<7j=15d1zqf{L)plS6SH;~0 z=aodyjX4q2M3FaO)9!*FM~3%vjyG2@DX0b_J~n2HS6G_^nO7O}oP= z-AV8gu#04ezIH_4v6gJ%?z05bk1-UkT7j@+N8GkM?#~!nOa$I_YmZum&SZB)ZFWQ% z^M|q!Bm0hZ?shC#NG?{c1nITz%2W;2+6sj|TYV;xo-+ za8QT|P4X){R!vzs{Z8YHcg~JSnV5?+pd*QzPE_@zlNTZRg`G?gQK+HX0`cTd%BXxM zmz?}AgN4AE&5~{`#gKaR+FaFZy3Zrf$TPoB)tM*-}JQ~7%Ab&Zh7^d{!tM0?W#Z0LozMdJKo6!d`gaONWkyrS$OQH zb}x2L{hwv$U>WoXSN!Vz1ZEr9;=%6VCB{njC)*RZh(`=T7{Q$OIFV9f%B|d!WW!el zHIt^#A_bHPorlS9R!42(U2`cUOhHn!r=YGETc5PGdkO~Gc>tL(Y1^an8N?$} zI(@|5Tfe8k2B%+`+6~@-F=V&UuqQCzaJNiPB_rIpr|jcv8?7+`Wd}t;$!KL$G#6v} zfo;x-Dmtl{x-HR!1?EL@M7TBP=8Q4dRCIDdkxlXC3=D__3TNR}i;225`!K^`H6F8; zgc9DohNy@0(dq+1R-jBqx=#=$YfET^tCX0c-IrZzwF&D8(8&5$XnD5OIv;|otq{pz zwd}dLFS{R5|DM2Kz3CY=SdAr%F(}%RtWr>~6p?jb_yMHl3MDv^E$}aZdPJq|b6lE+ zJ>G9q**?5hkO3$3V`Ppxvd8gYpb*(CIsPtaXU>jRvm@v%OZh2?iUU;NZ&9Xchls@q&PsiRRu z)8yZt#L)48oy#=_NxJxc(H5di7A!Lb)6dwVH^LH8d{0gX%H#tk`XOedOHrn?BT{Ms zp5t<^(d**_cEuqb$5r}jmNyZG>{`qm>RQYehpn)Jh+VGdp6nzXZ7*EVjhx6cV}qo{ zy16H1785y7T;fSC?L5`>NV60HYCte?Q^inH`81J=h z7SGTJ!mkwwc?~KbW#ebVUWN zIockKPDL6k&DY08O!yH`v@5hma=lQBVwC4yA~N9Vo5&?9s(EY*5MxdbmKO?RwKM$h z-C>*Z0>VSeEF0eAurH#5wb_yU81_zfG*(rfA&Yh_e(C%mCH96()5!cz45p}shmfE- z@dlk740MVFW@v~!e>NS94ka*?;1;DXYHd!?Y_;g;i-eLW7&ezcVb)G6A`E4635e&0 zayyzR`I@|bB{yA2(3-|=aj-mdLj!gxp(#eO_~hG?*ZcOLLmpqwVTu-=g}D-UMmd8~ zzCfNy$t}nCtFcEe;w~yk?x09MrQSJh*AhS?x)0j*=qJzOqNt?uEZ)e|E>E(X)fH9X z_u?msLyj_4-|}+=Mj0;tbhi^y*@BB?z2l4eXyec9Zmfo1aW@jYmn#HXRQ_N7wfloX z$UHtGF6cp3<$M@apE29(IHSPI@vcHu*^0dd(`c93j+J`@GwAX_o2=R!L=kQ-7|{^e ze{YPX#73jG##`&cz_PFPUuw&lSeJb%(g%8m^#OxtnN&7c3BJX3ZSYPX5B8;DuOmrO zan2qr*v2S>h8L1t`UE1Wv8qS@JkZ?*kvdT()~?9V5k_uyK)Qx{Sz`{vmc0el@CFhC zH-YRXg#>R{)82wex_c43#N4EzB@Dw}$A@AHAd(7cDN9f}Yf@sZxiC07aW~rXCpt{f zUwia*7OLz$>&QXT8bfJVrw`iP3tn;9U}NA$1>nX&P-AUe~ceNU-%FL-M_Elese(a40G@@(#ZPHkEB3Cvv_W&RHO> z#C0;i{Sr$RJGwWBQppD8jY(8FpVqD#pFj;%b6vUT#2V{7t%g$xPB5c>4e8(32 z^P`QMY%xC5M|mv?KWUd@)(SuIl`TaLNZukFA_Rn8_L)4j!>;_Gy$FB2CHIDu=oz`@ zuryLrUGu$_hP!Up6DZd83+|F^+iy5fO-SZ$27D+kZ#gXTA;xg(cHYiyJUjC2xs!`l z7qYsWXRyX|an|ltqj-{ zN;0lSFgyge)tE9*EFOT?FcddrC)3?njVg()URw#J6e_hb$01JKZ){W4bIGCf6~%`Z zDsFQ%1Zak~IAz!thiYkh8-|Lc9*Zh$EjH&O;4WH|cfT~fzn3?xIcpRE#3d0{&O1@` z6LD*;xw^`w#T!deTY#uERQMtJTBF*Yw~p)&a#P;@(y?~>TK_Qt&T3v287B5F{x_PH zx(#}j-l1_Dsd{i#XECbt&p#zS(?l+`)Ky4%CUeH5e}a{m%GQvqx>oW|J3Qp?@LoIO zf9>3EN28uUp)$&ww`18BXI_|5{cFd4IFF{xKA))CNt#kr;C6B-k!|tXf?&D8G5WOF zX~?2sI?meOE}O}<9Fbb6*hJ6f1mxvPbnN`OUp81aALB{Ntc_Q|w{yO5B_YznP&|2< zZ1wBlFXkkmZ9W1F<(axP^!-m|Ayv#e|406{rR=RdWS-&ZvTuH0o#^Pjc4a6Ct$r9% zcxMm@BGiq=6_`7|9{>8udjgJ37nyn+J4ZMrVmPoeJ>7;$X>{f@8nu_KWmUf6CaQ` zm>9Rf2-*FFdaYm3?-E7*ymYqj>A9B=Gt>+oN@hKF~-0d)FQ zJ09tH#A-P=0D0f~z0+n^XRv}$SydI-;XNCP#V-JXi;056+Df5c&usz!qnP0Yr03a$ z55Vi!HHx8R^jZ?D&;wMsP5HjylZAYSBXBrClAvMHj%52H3_UuWN{5l&t`acdSk#s{ zdBKin?LEpIbYi%?LoL6ulUXja0-Mg|6=BHSn2F7J-KU?l+59S_DQW*93g#-fWil`I z&ncVt7UTVPE@uFrElv1>A4T{ah_1y1d3#Cy!G2xJqxQ!Qwv=B73iSs@z8pg#rIDpC z&lOET*zhtJhVC$>S#U^@)uni*x3t<#M2+{a1Pd@4K6=8Ja zh+`p_1eDyMTm+>yX!KzffVJB;B@D37wmEzY>w6Bhin}qr5`p zrBlRbR>Nb^~77zWxmfB%ofRS3C9f{8p28is(WMg-%}kHkeBB@79iez8?K=N*#fSv>VZHg<-zXpCK^ zyy*3I5?UL<%OzjGp4u?mvgA-GECLT55(YN(a;zCo$CUt5mPREkDah<yCz8 zi7WX&j$g0mMebL+)b;rj^Iv-2@OrtakN))M*6@0wjE7+vWmI?GHaeuz-hv$)vtB_3 zC9@8bR)+UYF}#)q%ML_5pV{Ud4@Mjuv*q=Ix8#IoTYVyrzi3T90J!ZO2evt&kM)k@laU{H2MU|? z#ed|HjW(4#j^P7 zS{EU2N>1LPtCSDyzAwvAFUBqpa>tn${-mQxYJ6Uf-B)0s6{CK2-Cn0)Hm0W_ zsVhfg??}L*NZ)1eR*e=6vbV)nM`sdg>vo6%Uo+~bMGqFPGcu9^og!!jc%)XgHrsGc zls;A`XkE04$5^zPYO4(x+FXQ0*${0cZ}G58EDuOo2p@c64G!h2Kemnj zSKE`;IGSWnvuzp;BOx@^GJEbDS8$rm)$+)KLfIAHu`RJYexmd^$x1+SpsijTo=SXF zojh1b(0#O@{MeeaFHY~f;4>WOtR-8NAjtk8wnqO>LeM|+b`qj(wBUF*ue0{if^5NK z)=_o+A+kOs);!*wqvZ*}43?C8wW`95j)>ANT7Q(uQbRTiIH22Lu1F7Zve7{Gy?eWP z4eD8)ZJRHY%_lbD05M@SI5z3O3Jt$tQ`ISYm~Yh?iNm9T{E+61_Xs5)@xEo*T_>DM z(Y+k4+7qmSO_Z>%AW}Q#6K}-C>3CF1*H`EoVI*jc6WJy9QpIKyEORnv_e`rz$8^Hz z4MO=Hn~5?BOELb2t3z$)`}Y?OHs{mhZp!juel*mA#0Vv>1utfaW^^uBh88n{^yghFH2sXXxwtixg6sw=+Fn{tY5GzzKu)MPw?Dc^$9$S542tL zUUu~Ewd-Eo&uectvi{h}sNM8BkN)g2yXAGhJ;5&D9u54rkIZ0xiSJZX*EN+r`#W|w znj9lw!aeWl@BaA5c0aaR=kqIjkf#K@RBSwL%<+jXoutmtzqTUET#c!?nuds zz*da~eIg46kofAcU}d(0#n&FT{YCdQivMy#D z9|dk{7z?_j&>QJhhPRz<{J|G@t>?jnA8X8hTu52OBAcR2-t!-FFy9BqY>qPUgzhZr zqjR<;d#w_>$reOW_qXOW$B6&l6xE8ilWs6bMMS^_YtBv!E$n1^OVk!gu2&$k2r1l7 zF2$Gx0&V__3;zWs#_eN4;33K#GuIyLo*uk(YGZ*BkZ~c0 z?a!aTl%(7SV=(!2T(ps~IAgL`^t{jYn28RKoQBTB?@c7U*eW1vf4HciNY^AzOU1Qh|o~(MUJid7xr9+tZBR#MSH;$QHnx0`|-`i}S_FoR$od3;@-)HmH zid-xQRDn3o^W>aam9J9@}Ksd<7xs?RpNNP$jG&v4ZSIt{v~}g5C6nL&IR#TRCT_ zJfgea_I?J&y6jGV=_AY~ITd1tg`te4)aij=-McAk7rkOt&1{oMj|j3XnK8B_KkFXXcgvdLp`oS6OP*W(4jYLsH&Bhrr?8( z)ev$I(i=2qja9u*$`R+_+omY<$rM~;n|%gLN{vF!j4kzoL5p*pZH@X`ZbZo319WtC z$AZuxD8`rlYt6na&vJoU>M3#>#~46Nb!)w#)}|+{tzPi=HcpwfM`z3T({E_ULr{y2 zoZQwsAC?ImsMRm6mLT$UpJUZzJ=I^2SD~cH#o;1%xm*1INVF*I6us#5)T7J;fr+Pn z*CqnCg2;W3IC1ihm|OZ875dq9E5 z97fK}Iqu@|df;0EGNU!dk=++v_~w@$HsYof zb5~o6GIwiLe6kC6IhR2)RY)S(p{ivez(J$L^@`Q^UGkullIeYJ*L=}hcl^q(=Q5%G zCAtJ0QtrYI2rI0I^7?EW9yx&7G>$vBVl1KTZ(=`hSEtYetE1_TFL$E0%M=xGo0JSc zgskKrm!RPTyXOllb`l2QzHb@c%$w{%0*Ye0I2PD=ARa#d+*bH6v9uh8mE(?v)T#20 z3AL{pj{`A`gN&@*S9=rT39(Ai#A#jm4xF~N<3T5V`@h?|@wgF?`~x`dug`D9Jl`{8>aK(>Px6MafoUn-WU7$%#zZ?Bk3>oZFW1f;CCM zU|Yw78bBaN6q=^|NqVJG%!OiVNPj?qE&fY^u^{kTb0S^!;`iJZAf*|P%yill3QR(X z_D*b_tOQ$$X9ei-qIG(=*z(BaqE^l9qosEtzdmEh`=iWrWeeLZ+Mq*P(B8!#*~ob2 zp02n>e^|uTp^|WkO}&DrU!IzSHc{;zc8oTe&_PllmxWe2Wm5?cOYm8Ab2wLNfV@&L zJ~@FQr9L`39%_$baHpst?b!fki*CS>IpM-q>8$lFjXWH5aj1P8OewG6he+2&z&% zvjuM?3!7eBbXdw|VWdk@--#=iC@ekfgVR{@f7sw$Q3a|%yoXly zV}0T9-tiuWkRjOJoDJEe2YUD0z44$1Ug)hsHrL=-?#BdWAcbUrAdAE#%}JO1c|~Mr zwy!+DEjw(*zQ82ANQEwYvz7ZC-MECW*yGu%effTqZn?-0S7$9~6-W%=MvuI&+-L+4 zwsxOS=!cSfsd?7zE0|7dF&rG%`!66R!i{b4UqZ|fmJR+3pGu*QjZsrl(R0?AP!IKv zM_vREpQ;eSs;Ve_G9&zP!v*4t4HJB1JK8 z_F63g6O|=G29w%;pUU>_Ht5rRf&e+=?AeHac;-o~=faTLqD$T)6V(Jng6dq5Pus!7 zyqOA9KHKMI+jO|93=dFvBV4$08=jTH!qEg1dN|GjCNl4IreZ=x8 zQs&hd`7E-pdAsI){;E9c*JEuE4BL$g!1tk2Z)QjEpxDwCJJ(xLChau3H-x~a?6#Mc znEF^S4b`C|?&J=T08G}vclUk2>3 zm)pyykGRV#_B*O5!Z<+w8tavnQRW(@BKWmc`%?@~`beHTyj-)(>co zcnQr+HIUD-rGDes9h)IAk|*TeK}%CN4qU@w{%`Uohdoo&jsxAE*aQ%oQwNAjTSV`6rUtEqVW>Am4f~vIojKrYzq0-qGbttl{4)t*q{026oe^UNUj4rrM(=kPZkyE; z?uYDanb*8c_*hTqc#%dX;1-v7@mmNen6s&z3pOK?3~2cdM}K+(#kmk~p8g0%X-D=4 zrARU*@ZQn&4-zU}Nw&)-3i9cy6<%V+ zgd-oh!zmM#Oj}k?6vQCyw5{^mFr&jrxK>AP1*R!MUlCrfnFu09$DJ{W48y+&_H1o@ zah>`y^^C3a5n3c;TpxY9Z?`gcU<@}A2eeKU;xTK8Dq>Y7t(C>j#vHb|^k_LUgiE7$ z{2pdF2@a)nA#K?MGP_T$JqFY_wN*NkjwmDX3fm&F z{a4nRThZTD8?u^%M4%b;$^9{u$MA||0?1@Px50@DCFAx`LI;gxKV#$Fb*txek(f(< z{}otpqAJs7I=?mPTLO<$5Wv(#AmrX5{H}66Nz`_%*fq1UMHHM!O&)6 zbl8R2_$=#uoZMK|i#9h=F=TG?Qj|%)xKJ_BFy|%;hJNZ-wooneXFOcGUSg4CKd|U^ z2mx`a8snqlrh@;mE#>hU+jGD!`|(u#IJ@G_4i4MZTmW_n-*wbUFQ(;3TL}S%6?Qv!3bB+pBn3CqV$9o}+|>xIg8RC6 z!b*(Ty*xM|@I7`v*P39&dSI1v^?}#kt?(swATY~!Z^41<5jtfn4#d5F8t-_FZ4boZ z)XO3<1-hpLajrZ=GS9;5><5ZY;!1GsRm=(rYvLa#C;2IY(5Z2 z7(1$ACEea~ASos?oHLQF2MR7a^rH@IiuPI+$-ggxEt@NNU^nR#B&X}h#cw%K@Gspb^m^_p@k49fikmm#`|x~|O$H2N zRzygG@N6n}Kr;OEb~xrg&1-Wc=Z{4OI(>q#`>HQ!$E%}>Ud9EsFvY=&J=;Jfg!y5Vd2v&S0krti>^D2f|R9#pvmrt&SQfu`thE6E!%*Y?1T2YY&zy z*3*l8dtLSbayh3P#VfJ?V7a^a@Ysgf)o~Q3zXQoz;?~6tS8E(K-w@ z*B^6n?=^yq7uG}30tXKUvX@%V6ZVcA43xQDAvhST?)|c{;{>psh><;MuTy7}-UK^| z;a#Q4lsDOl!86H>4@VP6Ngh)9NOrnSnw_J*laD_7XLjrd2dvn#ga-3?E)mHO?SxMc zGm{ZJ8U2@RE}S?}o=tm?n5o&!!*yYsXMfPY-%??8GT}W0vzYVaT4wX!wzYRyUef0r z0uU7Py@fn43W(Zoi(Yf@a0v}|>EJ^l_2(RC+2s_#M9-qJSaR@2$ozFVz&iua`iWhs zfZ(Krd&||FALr#0oqd$i*Blfiv0$d_0T=ed`|L*aCmQ9V-K>7ifT(YIPck9u$11!K zjklwB7($WTgy|j*X$kJWB00G^6kE#u<>+>*Iz_M^PnnIC@Xiwu{V;X znJ->_I-)DyaDqW9r{CDhsz#)|!H-~7lvP%aaK5YG2x8;~QUMHf?IoM$8}YL8u9hUO zB3{>e=kQ3VsBB&S8W0@F+5ypYczKnTpIpaYje1krvL2Eto;o?o68Maim?sbM>Z$x z$a%pn$P7-Z)6O?s{UePv=sAoHCB%_{>+l;v=%KHZ zHuOlqR^A4nn&*x?<#E)}}0zJ=+f zSNa;Ha@EV0rg!v?jFSUX$w#hbz4BfPuCMHR1sBOpzUifB131N&bu}eW!{tP zjq4RCGEthCENK2OH`?S6>O#Fh>rO@4dk_xi+57XL$io30vS0M!E(g}X7wm{np$p@{ zqtVa}wG|T= zJQ&LRg`qE4rCLx#GIrVfc#=6?N%+M#a1j-kBkMP2!d`#L-FQZL$+tmZdG&rO2h`-z*)QP&`2=|#=A2a<_yK*L4m!MRpaaLe#5D& zUNyU$I|H7SbMU<%96$&xHrgX*-B0j25K?JM?)4yBBY778Xouoz5o^{~_z#6=++`~d zIYLp&WkvmZ7*%`Kp(LAx+4Zh|W2+C9E4U4cos?V(!?q@B>0$a>1Q!Ql?I9!OB4vRDLiWIng`QA3uY=WIju3*#tjI21ULfI~F}QQdebaG)3k^e&D0yH9R6D5(z> zRMXEYZBA&)S}1$KDcf?Wpe)gAh!<^ZwkD#HbfZnnrYYf-4sm9=#nHtTTay%+W(6fm5DK0K924iy~qt7bdu5NXS`e8=Lvl$iGgXsN#?{W~>BNd4jwU(1F;UC=%W|9?X!)(Qy@dT3-)osRCzFDrynCwhfz3I5bO`;-fGm)p} zZS|W4|Bx;Z-SV0@jmml5n~sD0u>^x)$+bR5am^CfMg)Yo z=ndI1fl>B=_^ctCYZKTqM7kGkV@_xUPjhR`8m@;Gb77m_416OC8BpBjsKbj}1lJ0jX}hS9!4ezEPfXyK}HwfMmLvy5M465-Nc zw80#;grO69@wMWlAP%y+Z{pH)H#GM|v`?D`q7vDpF94_gJ)5d34-DJkH$%%Kn>RQ} zWY8rU&5_)T9m7Z?jz*UPQ05zoX>crSv`9a2+;`^Nzu}?#$B3s+RI99POw!nku~NRE zlfEE35#Ux6#k`f}6^k>Iy><^^u`(MqLfppa=WQ;_3X=eJ%ricp@F_?w3opU+xrBgo zfEb)(s4=7gT6i-b3dIT115faTWf3YCeX*9NJKnHM@%LXZ9krzx#jkv7 z!IeCvT|)=w;POg;`eo+A)g@F|-}hIh+EfmA`fjA@Qza(k|liv?|I<1O)0y zPrmxsg$lqdw@?9VeqHX$(`Ixbd)`(1Nj_>m3{z&o%b5- z85+X|{W>Ki{avt(qqgzap-PD<;=V}0f%H-1V~a^+LkEugh)E8 z3$``NWRCfzUQ!TUlTT)8`d58&gleZmsre!@{vuS1PZT_W7TpG`B~`S( z*un2`;;|9$2fr=5!x>?Wf2~UhS$;Lj zTtHGHnAtT4YiW90i(PlH zL|M9G(h^0Pc3t90q8Zw~+(6{2&I{g;LtDOcHe`&T3-J|)P*JrXI1qiqcsJRU^B_|3 zoU`BYUrW>5CGuY}71x(@N3zc&@EmRu)R2Nc+hAe{dFDaTduz!e$8nyHZXb&Ws{*&jp|C>Guer_9$T zxHSba9kM^1a@<9Pn|0Z`d0d_tf0WbYbv$M36OyssUG~QT$r+-LoVE=WB=d!Y{D?F2 zC)xPP8nM6|a_+J^9jK2NT>r?FqhTk<^g=1Bk_>(~X7!q~)?ksR3-%KS^zm)~&>C|< zwnIpOe8u)t|NN^RO|~gM6Robql>Y>He(s;U4))sS0A4bU5ADyrGNz7^9ToGl zz+?A<5d(skrsC1rgTPPA-QnHr+-tA;UmIVuPXFuBpIUd$vt*{MIWVo@n!bb;$Q1wp zLdE){tlSZmkyqzbz6))5#D^ayYU^KTMJ(g+|FEOgN-67z1b(yjH@+$a3wz3r z<+yz?%l1!mji7Q|NArapPvlYjhy6S)*U$JW zRCyiW%dwygC0d4MwApCwu_%gIkW(sB`~9db%g+zW6G_TN@9|rVN%`NReU$6WF&6bS zA8iDMgxuKJ*r%~c5*y$;=c`WOo-RA@N4%-k76J;zygUzNpBLXVy(M4e&OD{~#fy3U z`Z4Dkv1 z-N-$F_LPX>Vf6Ta@3WQ=Phn1O24|<7>)nu#6*3(7~nU{ zzZVI>f6Wcs-gN+Xbm`Rho^OH!0fF; zX}q$*e&?;A0_q?K+G4S~wie)p}U8pL91Me&!;GUy&bKw3ciJ8yme``Y-kFzXk} zzv7XU={c8TUr*_ug{yWx0(pHgu4$PROs$q1(vR^!xsU7nSE_&0B@+cyh;+ z_6O04PZr@LoQcHWUqS6nL65KXs)SCmKdk!eC3Q@IEjt1hiH*Bxe-vP|wW8-;4g-4p z#MXN+kY)DAZw2cF>50WnG!eCUih(^2o|x`Gc`M#T!u1Z+*yV<03#EG?;`k9?4&K2= z+Zf-SqLGmM1N%u1RvU+mX!P2o8rWk$je%2B@GuC(rW~gT^_}DXc~vICx8_F#3h(XP zzu`gsb03F=Cqj*Fc`Jz012E699e){L&k$I2&bDS>ASH41L^QG|{P?`kY)$zUlK2As zy#pS2D*Nlh#1kcSbJQu0)Ig)mJip9+EL5y!rHfRoNQtZQv9;u}#NC0=%Fn)45m~+X zd=B*-u-2RdVyjPVN9-7x8QXH-hdBh#zvX0&G}Pt4)}FJ2m7lkl0yc>wuwZ3d-?0u~ zDMSWi_G-c*!NZVs`Y;kPbVom&M9_5Yt?&PUgYJ6prN0&I*!rr&;n}LJzgjlA^@$TX zW4m)Gi32bfsE*ph$j;?ivLRo6uJK5oiefk#yZ~&;7dBcA@QL^X*7sAZ=OPyTR5bqk zsss5mo2VxAF&xc7|Ftyz=$~zFw8?4#s+XRe%pIXfI>e)}}DQ}&OdEJG=v8MY&_gVW9AK^{;T(Z8;G6T&O%K0E50{`$D$ zLjO%(oR?p*V^x<5FD3J!ckQ1hpk>*X zv47#?W8n%Ze=_=a&DL4m$};!Gzs#mQh!?<-pK=+eqs*yz>9Nk(DKF#d9QbfF$dkIHPXhLDh+2}{!3(^vN0JZXd?kOV8&ulJGz%+{6kNj_pA+Z##PTu$^ zMns=x-(7vsL?wX7xvT-*N`SfZxxhnYOsl4Z;<~(Gv-VYDfRdMQ%oejp0vqE=C#~Xz zTxoG^62)JNMN`sBkt@m#@8$}>jS)Ql3@j{|aw$i^wQ{&;)V|N-S~P2ytG+6=CXk7w z{8#Zs8VGr5xWA^Hg093UIkMs;{Of9Nl4h3JWOlXMT=Z4oL!i;D!6pADYP&hDA~Kw^ zYq8|V!2bU>H-nevyj_nkvtW;Zm$M;qsj$wC9Jfey9rAylGbF$Z;hS#r!`ngFVE&2OcHJI%+Z95?6Snc~pd1)PlD(hA%KU(p14|M{BEGFPCxbT4gk(3hZ!k85FVk=A2_OMsv%%KY#oC-_xUuJ0KS1a;H^{ur0na zdgSacy$=#dio4qyeH0;3oV_P<)*}NW)>I9m7fjUntUcx3VUk5+&|C|Wjg6sgvtQ<6 zL$ix;moZx62)G$zp=^C;neI1`Q_tpjPhz0i>f=ca-HV?_38b?lV4$f21arPWu(n(Z z!XAv-^9cnpUrQ>;_8fDW1KR(^+S`D7S#A5jgNTTT=yoF{BD!9W$L%q%x;<{U$1C}g z+dOKbZg;!w_O@PK%a&`cvtE4LZt0krnVFfHc_UW7EMGEPnVBuk!qygvkcbG8h=_;? ziHL}Zi2M1@F~*~vd%5=y=eF6he$O+XImaAx%sI#V&=q==k@pgna{py02{O=@Sd^HP z@`8-J9+$B=hZ!@9Y8MaWFGb{At^X?CmTy^oZUG#TdKrbJWzN1?V>Le{t)^Uc&d1}- zMKR#M>Px-5;>uBPEskS?xEriB29ox6zqNaJN{Lc$9XSuOXM4JkuG}87)pXkOz@2lF zaAsVkE903Y(PMCU#OT^;9}j<}leQ)SZe*5gqwxXh7RrH1=ARscGy@6HOQmr&N|cz?@w`Sjd(YnVNs!*;PB zUPy%3)RnBvl#$OT=qpNML))F`Y3%6cM%f$7w;#pyaqIN~ac5c8u8O<5`43dIQ=&9k z=3X6=T!V*vY)m;Xw%TF;p}_kFJCgH9$dUF)d3yRBSASnL!j4w+I#i0=V|cS;ISh(j z-3^#5in!Wq$Me)ucodi3iR>IUyhMCJcm27f7>E2^fdf0`Ahi)O+hC`?7uj^0OHD_g zI7RQY!OkRBFe<_t5BN)Id7ziG4*BE-lkHqUse~IIvYLTi?T3%Bn?aA3H2Eyj}=cqB zcLn4JVCMG}GL=?RsUoK2Sc+pNS=6*@_Apowxyai6z@}GF94>6Ll2pvdXO0mBnxi%| zdvr&Bo0ThHQXn(^IS;2YPoWV-)?pyoCB+Q%+1y`&(EAT%&U7uryr528B#=fHBxa{J z$ukGbiY<(;B^&H561$Ee4BT}61{eB$xlUtQnfn8HH9i_6EJ|P-R(jT^#fiuIHrS^J za=tnd0s3wHhia>5MISQ&Q*ZT-0|l-Y+fHLlNglB4fJIV_@9rkwogCA)G#ZtXnev8{ zlC;PrE%~Oz;<=&1CA-$dm6WNj#|&a^ufQr-rEW6oaH6ScB^_l~yeBkODK9yMPTBID zeYrKbdsgNHQY)3(cdH$NurAQ$8t?Ai^{_%~6Hkav?e>swGTcWC1HLZS4W~LZfa_zC zt~5_xsYu!Ia1m5yPqB^uUFgI&<<6ap4=i@An1GxB*cD>YY)cHZ2M*%2ZFSsI0Nl1k zV;zhK$^uZ1)yG*d-*#lrbxoztbmsVksv14 zS?-%Xxe>VLC1~%?6_Hc}@w^40?5!pNv(0zX`UJacwE?5)%>JhD8$473IhE)0GD?0q+N0(|!tevQy! z`?i8pY9C-FAnv>BnG8L$#=T+R$1^3b-S4YsKlqAa(->nv#`~h&i{} zRS9s_6@1ce_ED($d9U3P??v6hW8rHg>)923I)TL7-y6kaCbX$E^>w z&14e)xXTQSIo<&s=$T3BnF;HIX~lOy336>>jE-+EEXyRH$6tT*Z+2JAgT=!V>>U3T zpNIQ(P-h`~u0z6~o`4fnw#R0y55}&oRw9RL9?#6yD^{$qSuvGd$`eIaE$1Nw@ruo< z8gtRkP3j`gvKD#X`f>+Oqbywj>D@?rd(#$1*Brw|gDgV0!Zd34`5gb{Uw&ivSJWal zprl~a)dMOQP<}xBnENAyU6q9z}j#6Mgj`DTc**IHYja7<;%B#UPRL@>3*n%gtjRB!{LceW_ z7W+AI_kd5Tf7s@DCKI!`>9@J$xA;KKRF2x3(9LdG*<9CV+mZqr#GY(-dc<>%{ky~I zuYRTNj6N0gw3mfP-D0~EIwBPC!bA)`WzSdb-r`arLj?E03(;jA9GvxJ15rO89U))y zZU>IATHBkbg148D^DxS)a@$yMe3%KL#ONLHsWeKtA#25+T? zs@FKCGxkXif<7m;fu03TM9SDvr$SYdvt#}@Di_!wp92R8Jm-o02E74teSa+CsNdVE z7@>rhaoMR^2~jdXBtVh zq8{MK6>zSYQ0`*CMlR%(S+U|N05@_mo~6c=dRF$y`n0N$4x=bNKh2>B_Hgh@-S?7x z7UMW?qnlAzi!uOyzTOoKE~z9NSxGH%7=2NVtYE;BY{R~cXK{AIH@;$DRqar77jEoT z`?{K=>fT4<;bK_76s>h1X-6H0E&3+U8u@*ZQ9(Wh4fd_?m?_ijJ71V^6r*TTE=(s% zcl#kvJIaRsm{*lN1mwl_lM~c$M*hWqUVrv~=IjE(lhQqV&S?B=*VWlC)fAMcmZJe{ ztv%wZ9n>~}c-14ptdQtfAt+ZrQWzP0X*%tiN1XE!C$4&3CBCsJ-L8Ek7D}-U?I=&} zx<^7;f}bur-RpgjHfaDm?S>c5=>qNMN1P)QXCcW!nPzSY z7({!D)97PYI82$f2xcZT1iI4c zkHk%<(Ed&-@BF5l@kqIa=XbY<53Fo>Gjk4&H4{n2?;_D9=nLTFWcP2 z@;ox5%*%x(KTL68Ti{;|^wcUDx3uYKc%m0k?=ctu?7mo@hK6>#zZ&IWIZID+aW9Hz zMdQUcN_xKdku=5N_2E^y9;iO$3_|AIgO0S;r!asA2|R!OBdNJMiGJHvt%U2ARVHB4 z8iVZXF}F4)E@m{ef|{ij4B`OGfn+W5tdwJq^=kDl271PgvGzxbBy!KxyR9Q(gA-A3 z3`xeUD>u2DFm3rGVW=%jRWb>Q99IU=t}SM)qE?c2&xO_1K1FH&*zs$8{CA(ie7QCk z1d&<{l|7V%Q)C`ZLf#fqBd$wyF6&kHo7|B5y?D(yQ{_Q08+{TSmap^w zleVcEvje>ickE_ILM`suTf9fispOhYO3kEuw#`w5y$EF>GQO;9+XHO{VVpgx!@eUI zMyxi~GrUvOqz$$!!9<_1x6Dk|A6c58ukgPY-x5!-_d+~V@-sQ)C`P#_o^hBJfl~UM z-3e-OB9($+Z%°(|DldSj=EyMDzEKOIfhY8k#CZ)IkD8b?e^W=_) z?c;bxVRSlFUgA@BAv!@efcw3eUzJoTD3)3GPhyifr=hD(xyz&$bNcx-dXSFmpnc{& z$ac>XH*lvVj!tZh`o}(t9_X#=3LcrbcBzF|NOcF3cs+BF)sP* zQKQG(H#wrRA$2s6@r9(A+0`-@?7NBtmS9C8=sH)YzJtt%KO`gt%i8j9+K-8UaPI0j zEQLjl{8LVay;*z)s4eT)MFeYNJ`~IJOY|y_j6G~B1@zm7;6aEu@tfwV4TZD8mLnJ8;ggdetRHWWIaTVvO5$aDF!_n;&J?P0P;+jSe91u3=DSP>Et+x6KkDQa5mh7G~! z;4@0yv>W3CS5YHw;U;=hJabk|^hRx!4Izul)J&!u)~(KN$&b;M$-`Jx*#>P0hD=d? zcoN+jZIs3m4nY6fZ8-o}_33iP@3GrA1XtD8S!Z`_2+jwq5JJEz8ysKs4iG&I0#DS?xVS`tZ zT1~rRtdDeWW224R;5=n72d8$S+G6YO{)OZ&E}W)bbrsFEKW&1`=A3EBEJn|7Z&X&4?U0 z2;7&jqL5OmWOR>qe~hN1`K)A>7E5MY8|(w=Kt9?RN$iln4uQ}!in{QQe%$eypJejLT$3l)0h z$EwQ(N8$vbv7v!f<-4o->zdq~LxOP;*5wrZy@_)67)kxfO|A1-f^dWbyqA{)I&e7vy`R{hi|}se^W38`1@qt* zKZq{HE$#zM$;cmG?gQm%y?qqGD!(i4!|_Whls$H#ns|*}+>mlmRR-;oid-)T)yblX zUk|6m3H!9V^omI6@qM2ql+Mz!`#ujS_n~$<#fc{S(iat)ixp987sUHz5{bg~@rx1!hp1-<=Hp%LP=AEO201Z!Rx0sAQztE-zJtDmEVcxB;B9QQBrj8OMN&=9HF zqlJwjdfKjfG?)`MqVh_Rxx%h~)bCTd21^%sgnj3m{F->$2^AsZGufjCR{$m=MFuYa_#7o9}Lb=eJ%h6)KR@3k8rO`jYu@^n*l!iiFO;X00P z_U{z#K{>ly9t{R=c>~6DakhgVO%E9M|BB+1cdJi-Hsa}RF@4t!} z4PPoQ18Icj=A&9UH^TXL+Pp*tmqE8Ja7s`T=Y_uNi`(i8!?t55cxf|SHk+E{q zpvWki=%V29);M!Fs2)SHKrRlPO1rk(9*F+RCSq@s)6r@ZHqxKq(I~dOzMA5zvNy4N zUbM!XVkhikQ}n8YizjSpPKEFVqONjXTcstFKSirQ0sAM}+Enkc_834A;20qtrFFzJ z-vO@b>#EjJ1t&AsM5oE}Y+d3qin-2KIuciq_-(aAi|Pc9aELD|)UfUdVaDhqu z=vzUDTM}=rDj>5p2cC}#s#)ydHXj&g-nVRf^o-I-3^u6Gy zl%26Q+C_V9SA6FlB7S&2?=46_tvIL`Dwt(<5!3&w^<*C$y#2P@;h5Wm)8*b=5ky?f zR~Sg8PCbQEq1X3~`*Pr;Ts71=dxh4`4&|wU)|y!!e$?$63VkhXRdL2pH6*A5AwWi? zm+VM2e|+Ph7-fm+bGXPRx5|!I?~j!0<1qKsv3MqI^_JfH?Rd;g?uM`0iQL>oFPCgv z{dwHi<3&lZ&M6;)olm?~E>QRvuCjN00A=`-4j#o}r=7{|#BqVkk<6b16>NfCdiT?I z){%hbW4SiXxx|TFq#Eo!KaJ|^?EM^+&Dm1&z@2~8rF>yWZaG!-I9QS1>w_2#&s@$2 z-MxMGp-%%-0iHVkH@e&f_Hkl}R`Mcx1J3$Fj@YJ%5N&{q(Mk?X?%hqkyF3o+xtu~r5DqelPMzVHQ5)?}1wU&fd6UK0bN^lV=xP(>pzmpEd>y@Pgc ztxHK*;wqT)+i)pf5c!+z3{{NGDawicZ7zp2OvJOGZ~4xfk(Zv-D50hO-us`{P-j2* zpKY~E?Z?a1P^_0f1@ZLa8~wWd9Dw*CR`0j$|CIVib`>K>HJ-fUyxbX3Ohs*Q1@ zbT*)J;_knCV_`qaMP}D*tn5S_jbfno*|qUZhBx>i*z!GgT|5gHkzz-_eq-`WZ|TZI z^r0kQ*_=02y;2zmva7L?J}neUDi%6wV_`6+ zF0?Tli%zS)tqyZxYgY+Xv9TM&wm@(zHB04TA=|<@?_;0`kC@kOyw3;b0WM^M|8V2z z#BA51(iD>t$1;18k&RqAcLk26p*vzza#E;Xk}jcBJMwq z4lfU33xYJ1Uc-fI*BTwSg^pxwa&=91U(!iK>k_KG)Z6_@Qn=mzC*&#rODCyC71guC z7SD$tua4Epg6>*yOD01s)601foN9V zoaTU7nyuk~l!irl&{}-r13iEFi?#YNe;sV?{&>+ru<>DKIu9#Gaudx4CO-?D=` z*>l^kbfj?RW71ijGwocptQI}tn)qC2mtwl{Y_hc*Lw#JVQ+}#>1v}R+Jam~}3u_E& z*ZGKaaEz$#_0g%sHEH}+0ANE7Tua_>Zl8_0i10x#a!MSPnv!_rW46g>hhR#&>+Ff5 z+QlCp;eAVjhngI9sp}^2#z~xRjc+6gp?)eV_-#2l3E=7wf3{cC)^(%!oHuPpeg`ju zz=$7gzwL}?)Zj(6;N`G(`3X)EOncrzz4t**j2EiUbsgzCK4Crno+F~(c31DEKUDzq zY1`}X$vd>fdgFcXz|ns^#PD+OJ5YV2ENOf}q{E>=T#;opj$C9B3rKhA#M0SnN2>26 zGP$my&&OKD!rReA2Qf5Iyd8@^eXZR;*>}eActdhjZRmeo7M@PT7+zz`Pu2e13#E>W zok~I}p%0x>be@j>%Cay^1rnB>Xzw_gqC??CJ5x=7l$GxNWCQU`7oB3Nl<47XK0^hq zaNSohte>mCJ;XW|Z%KQvdWPeG5D72zeqI3>_g(FJK3btJmn?;9d9QttQ);Br0g+&N zsK68!lj=u7`Mqa|RJMUoH8G`K-|G3;2C+*YdN?ss+ z_E}=S>_W&NCaxIQ?ehea!$rEl6R?qARKvY0mWSO;c)AnnsL{W(uW}%Ya@N?_xd_}R z5ds)I|3Abzr#dDU%`HG{t%ra6_aw<8uwef$;W`T=yszZ-Z@+ ze}dNcO}q86!u)YrL1wo-<_w%~NG-AWZAoOe`_N|j=mR}dTe@p)ST!G|-jvd#&xXe{eicK%%x@#AXD6{hlU5CpV&r4NvDT11xE_XQ zluuyfVxsBB_}>)OS!HA6V=q7|+Sp^`5{wQ-501|kVq%$_j=Kr*%*{%u@wm$-1|~e7 zKr&!f5}R}xM`a?FcGm9tFMTW1$dt!iMkW#W<^S0bZqPv4oX3K*oPn2qi_+1~^|$Y?$1iPOfF|9(u=+L! z(SpQk9jWhR;aHf|gLbf?#qRsnMBQgu=l)^4KgW{9S-eaZRsAbRi&`DG#ZEGR|J$AR zz+=JAqxF@uC`(nFGhNb+T;zGn>mLg#V6qonZ}b^-C?{uALaUH@soR#uklHT2W$z!f zmc->W>WsGr{LXQS804~}bjIzEg}qp=gUH#1iSF>h>PQr8UGb@q9ls#_K+eq;5b2x?6qqX%d=Id%pQtqDCdhTii%@gSABfB`l%~< z*C!&;E0MXe1s-UFqj@yP?Z(6@<~|aXmuYQ^{!4;3&XG4!UTw}##@d!dc|+@JDka<2 zm>+4_adq34hZNdR%%L3oVtn65H=_I)2W@+FuKl5Cc)7>zh-XDiWae<+wlkmcm0aRz zZ@ZF+rjjR^l#!KWWz7X?wGKP8G3BvNQJj zV%#m~;RDszCt&xK=Kd8s6dRN13q(x3{aTkmm4~aAgf<3acqDt5EUXnm#_G#r}{Mew;KDKhhsSVWTTd-0VN zo>G{;CblJ>=~CE7?|snG`Rs{+S45`|@@H+Es`q`E@X%In5v7WOH|gkh;`}(8lpCK( z1GrttJ%#hzZo8P+nhd}ANzlY@8lJLGD=4LHkYLkqpZTgccTn2n^CVssIbi%!^`t2C zOYJZFBEFZelk^m-tMp~!6akw`bM}=Zhii_ED=Z%*@^!+D%7fwog&Zz9luEJNVc#Uk zMCaSL*&2}FE;96f`!1%jpX>f@`~LEfgrP079~?z9xanHx?MMIlt{kEE{>6UE&L?-Z z*w4O?+bC)Ci%*=B8wO=l9HXe8CDLBC$vG5h@ly|ZwYMoAEL!qwHYKZYObVa)Lb^in ztsEY9-KN4Q;9EoU>-`6zJ`}OMVN+-&a97umQMdiZO@%S(RKoI`ynAUXm$li=`KyDY z-)@P)q+md7g9|-qgZx|kx>wn)n+mJ5c7)xwsqjA-bL{p6fbQ*%%M&avfC{T09M8mC z;KE)+ts#k+1=LF;RpQY6-AJu^MjGa$QX#X=h9^{7V4xBoGQ!~wUG2zC&cR(722e5iT*jmD4umvb_R?0hTWCFrYd}4%BI3hEkpTD5*CErX#uCl#V83Qeta@6 z*>rD8aVH{CmyrB=Mm|u!8QtPDeQo}Eh|Ss*SN!Z|G!mUSaNC@Oo2v~z8ouf!o4YBr zE%%lzHuD^`Qe6jUo&11v{0(-j!hII{nv`02_iYLuhv;U{D|vrDxkrH^vX?E&0pxv3 zHsky$b$Ww6kZ8lXj+`G>>%G7GFt=`f{QVRXbfZs{+NCQ|%+RnY2YPoWJK{O?B})_I z1ff%qv;tkh0HxfZwN}rLlqd?N(`?Uv>RL(-%uegbR>chOl`FcEx~X$#%X11-QABmv z${;%_FZXd9%OgPl5}Q}_@2e9o5)`bnHQD=_-CUf`UaXA;*H=V0BDd&3e}#OS4@GOw zf2OUt&(`^-8$ITCw!Z3&yM;kzn%|J0a7!G7Zpf5p8~vGfduyaZP&vJx9&JFCtmUTRvs zwkyyWT(!L4fL``Sb^5YB@9;JgaQH$JL&?;Kjitwj`upD(|IKz+K&p6-Jj>9pBRRRv@hvIu| zr#BUg+5dYypS=@ETKuMzRpjBkX5Acl7m9Q+{i}qO*VqU#oa0b z=i=m{%DZAgkPcZ9hmfH^rQVC+{#n0_+`!Sx$=}XxZU*!<}DLaAz zls$}GpZ2q3mXc-29JKZ?(xDium$Y;$DI%Iib`7dm+gjlCYu3LjcmYntqOev8)D3-L+p-DkHX;CGj#I)l73IHiUaGBDlxcww~gqZa>v zTXdai3kV;u+p}u`R6-zm-j!3z?sz=z*m^RNE2|;C$JyOfrA;03_}Rh5Vdl>NkN<3F zb_JtbkND5J4+~garO%NH*FQX=S4tp+?hzHb;A6zoAl!}2HaJJK3v5(^1k>DPWBd!h z3rNK%d(~J+(a4gq?4*tJ51agWje*o(j0j$=9{POIQ&8KhqM9PAp@nX`*x8a8$!FT{<9=mk(zZuypdK}`q9hkjfIH^PWl%CRbD&yRp2YEwIqhJ`>P%l_$MGU`eIS6 zh$g$o7RN8lGbse`gk&W-6bt#y|w$xyQeO&j$C)5cw43D>T-xgVx)#?DN^=wKO+Wu zR^Sf1(m$^4TxF}1SeGy)G_xjgP&V6@x7^y8XNfw8X5`EEP;|suy)HXytXYn$g{{v4 z8(Z3JLwxHOhOYeGwlSXR98|QS;-@z`u2yKF@nkOMh^n~S)%(_g&%EGEKT@u3)2sdnVw`@B!GZ6(H~7*QtA zm!J@igo24Ttj9ZP6y&?36EElHby4?vC!5Ma$AQUZ_)(mW4&=XsnDmczT*vQJkYH=@Bg)SBH*TD!`kcwvTi4DGew>8tg)hzMo1wqK9M9 z=EZo%btM~C;o3fl5#_1vClq|AQqtL{3E3Q7;h$Ah#+l9GPF-F8eTmci^9p7+nyZY1 zhpc|#^mXsNyY0&)3k1yl_LaXN@b8Nk*^kv^U22f5{L|xSC;T^jRR?QeJuVC|9c((g&_m+cFpGCJ-Be{?Apy`YX5Yn zUFXkPw%2j{xPG&EcDLH;o zrx(V>ruo|`Q*3&^g#{GEtd`B#96Cj6Impd$W^C{sF#xZ9L9G<@}vV}hLfu70i<0W=q(q~wxOXvV(b=2-pz)_9p zHao$wx5z(6?|>uOgSI&Ps%dF&s<8*+3%@Wp&|tZ0ec;J;Rv&Nu8h6)>MyC$UbuABC zQ_!34M`=>TZ(W%IXzAwgkdc!Q$=HsiNL*iyMZ^w`l&*F|(v5Tw$Y2|@J5GQ@Wr?J> z;tsS;)ymW_$HjZIgD~~p*|x>u?NpfV65CqAtMEpb;P!-`G|>C)`FIwqF0S5Q*j#uR>X9fdM10&+HB@R#9ki!ycg#=b zjw2kwr5;K-R`v$gxnH?2dvmr5Ho{vue^MwM3OTg7qy)MIEimED&HgdjkaXH z^*Py2Tx3VH)q`Z9IaVQ@(@hBHa+MRy5j!5>OQFosR#?mvd1}B}7s0T>`W>HWi(MK% zRn1yO+l%tsPAAS$A`sAO@AzDG1E9b><4ZXk(S0EM7pEn)#40=M>x1~(W#^)A_j@Xq zU3<^Ju0{HMKPM(};e7s3rKb;~lTJGP(Eo0eN$Mm2yQF*mIDsNS=C^jiNuiRH?P4J6 z3V~bH1r2`^P^I&b^8J09kd`={;^$$X`Cu^Ca4r>=h;e{BRhrf>982iwRJ!an`*QQy zY3@I{Y2d%auE?ohE{RLIVcV7VoLl3YoHBb`2farPOSt^j zr-FyrJo_%E$|0vKP*?Z&@k~zs1W6Mc`>OpA&s1myiwOZAC+x>;=VvvRw%6>ZiX!C? z=^jSzxBPP;u!n2&7oSiUX)ElB!dwv9L>-%}{0EoN64D1g;Z`dJ-@R?uJW*Jofu8A; z?OJb!c8pzqUB0+ys$Ks?VLF;P4Q_bC86A1J$j`^q!J=lc8=okw(X459lYh#M)M__B z5q2~fD|m((BbzR3JuG`Ac(K!lCs1C| z)kZu~mIjB^NX-v%7qimZ_k_lvt5H zo;H~;7vu{p$hOdX5ci<4_xl0{m*{osQR*R{c{&DJJ@(k5cqZH)1&;@~ zw--O*%)axtt{8Jgl*?ETp;>oryMl5kXj3u7!N*Q6j z_b{u5{70nBNi24EwAeZoSr%t4+NUl7jO2?(7a>Mo8Y9znfO+I=9}ODFtj$v%;+Wit>uFTlyC{ z>VnM!cIb%^(lUKjy&ZO>7HRSoJK~sFiY>6On)zO4PHY~OeKcksc@oa-E!Tvi?vD9P z{{zzsjx>Hq|F0A5M0TQ#5Zp#`{O(Vr)UnS_`4m*}T)9Y5&FO*Yx9wcg;B?MjdoMQusey^&e?NbAHK5h9 z^NCz(yX6G+fn%RAHkGq%un&D~N-&f%S@^2e*+)5FPZWU`ez$$>{ZjC-oZuI#$-t_Y z6Hii_#OjMqAk$^o`XncYY^nqw3J(7?J7==;eJ2m3edY@`8k+jN+CG&VSi#6){#D6{ zFJfRBID)>yLwBx@DxT}vVqw(@d<`#qTYm}e!F%{ zn7jT!28Shfo%bMZHp_H=Bu>CZ6-sdZ#f{Ng{G3uxC_Da54g+2f z-L4n<< zZwVfY98Rr>03-a1Vsuh+LOD{6j9$bENX3LdmW|4xNWNQ)hhw%B?(jE%|C^1i+Dog; z{lq>PmyiI1Oruz1Y`jAv)(cC>goKOjOd+f4mrjC7TjHXv$2kzaO`Y8p-|8+pW>dBl z{&n`_uD&Wnn2azNK)hfBmb?%=t_q38FaPS=qR06)uU& z4mKyIKtML`YwoE#cS|tXlQB%qiw4rbvc*ak^aYOJnq?Sj#7r(sYDLpbiL?7+8VWl) zZTCk9`V{?R0eX?+75{BpoH*o0le0TapFQ9JFdIsP)J6l5)nUj=iJR)LcqRr;cE?6X z7eS0Q)|4}tPk7AI00%A!2D;@k-67L9*LF(@)tV5)io9y=@k{koc3VdR2u;4s!uPQm zy7Cz(8BTv^j-v5hp2(g|+`2ruS7syBPs%i|h=tX$j#s!(gLL@;uZh-Jhl(_z+!t#D z5H!?S+8*)=a+Uwi)@_M9m17%?qZVO(bnSaz>>|OBVy$e0FW0^J-V_hJjWHDNxh0Q< za+7X~X9smgfl);#ZrQz&R?aZO&fS{8FWbUdaBaLV|isRgP{)+X>LTC0u&PX=^sD6bmdg7Uz zspP=C&vyH`*yKk~UM((i z8y(8;R-?aVN8(Mnp?GsS=C7~%M$f!tv6NVSb~NFqCbR;`_}7kYaV8(FI6kiauWl6% zF#T)C9n0j@Z^h&FMBw}Ybo`R_$4DN;THp$lp30G=pbm08ox}!1|4#nBY_^^8)rNmB z1#Jc@@HJ&OF}VKjY`iI4cB~+&8_+R#JmsbWw zcENkWd6hCAb}`p^MoD40%0BVaOEpeEP1FosWS{w0a%QHQ@#i^C9qFSGvA^(^pnSJ| z8Gj!@XZzJ|L+$O*N0>sAXVRk*&XV zh^YLn*x&CG8z{#W7p*P+y|+-B9&D~ZWFHt;lttV9^Ftowvq~UsCFl9SZkrKs4i z?+_@#hWk`GMqWm7G{QR`=s_AC`DChyIyV=ysVM5_JsAfwaO&C1+P%#B9c2CZ8$?v%W`r7Y;A%^FPIM5d=CXiy>>8yJRcVE zznpY$+q&GW@^pa0*FRYVf+cM04f(3RLE&k(F<>bE#7su&V;6Nc1*Q~O{t9oz!EVk8 z$YT7UZAoO}%c63i)XKK4)n|my(UpYTZMg&}587>e>}qvM7MhYrvX`TlRJ}M9OCXkL zxFh;eiXok6if-R|8DYh{gT>P>gYDRr2%C*`@O;iGY^#@Z489PZN@F5fUoqi5)g-(S zv)p%gS0r0f)kPvOcRG6&y~*Aui?A|hs`WZ*7fzmK2NF5|4%TbzkiX%wvBNn8@+YdD zq76Ob5Tgt@*lK;zK$}A;p%pW5G?#r^_(|Haoa6#hy&bQnLV;73N6FbvI9S7mTmO?` zv(*93&E|K+DPM<9Qe_~JpN<~(4eU|keDrlk?45Y#_FpM9_S>02C#NlEVM(}LuztV> ze!U*Bck<%bdYpYSIQ_^=#sPOOyB>fYfG`kV?LEiQXrg}KPiSPj1xL!}aNg&$0^jGP zAOd$VjzwGNZk?0(D3LP{z2?W&SaP;;@$5q4Q||NxQnc8`sv8w~7wuyv%SEh3Y4MvU?f@isU zYq^xV%MEqS*1Y|-=N0uqCNg?GZP#uMoe>G1$qs#8{+!y>zzK8xR#!qv%e2RC*m|Yy z3PBDMy3wD(3yH|3kbpnhTKFE0s7||i>)E^9KKb9WPTDO#$GHn_P{6bgZSg_7)qf52 zOi`V#+XBK;GlcUD-!i-1ThxHk_9nZdnvvqV2SqI%o3iyKVZ`I88WL!+<}d0!jK6Pf$EFmx3A(Fhz+d%+zo3 zf82QDn_dK!;`YeZG&i1!n3U{M32f(W4c>Jc`fJ6mEr>>Lg5aJ*=3eOVQ6sp)?yG>& zK_V<2u=}?Lzl{#_M*KpFWO>A72bf|w3MRawk{1`0 zCfZ%iQLBTvgaGAy-QEhwk`<-6RF%Yr)?4-K+Iar81Aa{Wao#LDlwB5|ZJqy;jFg1^ zup^3y$vQi-HREVKF`ITeE>nGpD?He0pMD5SCf{GrWfvGzrEwsGSKs<$@Azw9GD4eUe`l?8+{AF}5wIROMbP zNk$1ZPII7ql6^ghUDiHLcsbKNPF$)sad@94Z0H+gGLqQxd9>DH@JiVveWmpQ`yx?s zH}v4l=$AQ=GWgk7-W~RmHr3`So;_b*247l6nCCvbl+Q{evKR^6pmVv#{Oc(Dwt`Ey zr+DpG?7Mgt>~*Qe^L;|pDYl`pnQiui_k*7)HuxW%@Mn_@{-^ldFDdCSU$vioOE!o0 z<(Hfc*FlHbQ-$@xeH+f~s;A;W54#nYHq6meg+m$W8FHsx^HlKac$c;m>x5+bYoBsn zM}gO3VRbxR_f+sov*GVr?E0sYIXZ@t{)VUG)|%h$SpaW*%6Ts7AT#fUd#~ISA1eCd zwA~yd!HpG{r^Bq|E%8jcfl`r_XM>(9yxsgRlt)jM3;8nRc&FVKUmWn@Ccf*p+vAyh zIbLB7DCF&qfJ$p2Jpi>%d80Uz!8vB^4V7{j63=wn^pzcO$c83D;g0JXh`dk&h+)yK zlr$_mB8@B@vJnh-uu0xzBfPhP9yygYbF6dQjC5RJ4Mc-rqn-+`2D^uLW~t^a-9C`Y z4|>c~sYOG}iU>b8aa{PHA^~fyuzQSqD)_WcJlqLJZL{&YdXtw(W|$D4I#mfBsNsr> z-9#r20<;g>B!|8ms=6y+LpRNuXysic>498;12knRxtm)W$KT1S_ZQ`4O&iQ(hM znc>}%F~nwGCO^36E>a_iKxaKwSo$IVht2V|7L$^>Oy|a`kZz58PDk#`HZO^h^WdZ{ zsMb>Xq_9$Ol`PEp6SD-PabLBxC5i=t*>Cp;PK1qDK)){nZppT&LbQTKeA@23wmAB! z@Tl;vs&z4w;y9E3WVP8Ey_k;FlaOS60#?&q&pI{c&lPNDif`q_&LFqiQvXFvJ$5*2 ziFa7cIXYW?-Ei6&ZS7CF>av&X&GUx5!eOK!WQVh;jjY4hggo~IV|4{+zJnAWbl8^X znvw;#IOwd5nYkKOm!}ks@G-VJ!JgN)3aMa?1A{k4o2^Z%RSeRq7JDdrB}H-(&(@2#3dX2W#&dMIBg{kuAba#MJjNyM*7qvGLXYx>`q*l{MNQN z$7)z96B9MKa?dENyG|8-?Lf{5h6@85Z~fva%?$iM*LL8TF7ut5nCY1zUVpqHKnCK8qdNDO=>+mmQ*072sZe59tg|e=1(Lj^&|$d{^}GD)~Qo!BH=W=XM~yc0u`>P|aLYI-}9LktfQE1)mh;Y#pBzDxj^LH=x(WRwU6Tb zLm>Y+J656Kl5X}w;$lh*ZfzI6>F;6hK8db-fn9zfN_q0BzwZ2x7W>Spf@`H)>ZQ;9 z2<={KUu1{HU+K#nu(MsN8d`o8n33sTMp4Q^aTO>V{n!3w;g9#*rTBG#IG)$-n}9}# zB?%#lf6BM<4E-I~i5zVY*msGi`BUwC|C_6>&3>p5<<(tY=HUF%VeW$I@s_o#5I=wR zwWw)b(v1gcr~Tq{9y5BZ*|xBR>1eU5wmH)x*Ha7&#kJ(>cqVhW3{9xZ?V4>Vm6>stXjtjcoxGgUC zy0$jCdfv3n87SXaekZxx!_C=JKIkaeZ`l?K3M@I1A#2dK!YwJ$->r!igbNAN%F%S& zwqOqkc}Jz!T4T3Yt)ylx<;Cod#O}RKTug&~##kw^xkc+t?FaW@L;cx651eLA2Z@(B zaECc$nDDA1j>BVGNE*1Xx#yz?!;GjpEj|rp`9@Y?5pKb$tBBT7+k#{5YN%^!Et!R( ziZRizj!-$Gp&8^~Wv(2X)2M4>;l@ReI>9}g6m=@Rg=io5y9v?Le(c62Vpq~jv2joI zc0`X+?!qQTPn?UKjFLs)w!5Myq#e$|DOC%(_!n!iP1}|dYHAEiii_zv(Q@D6QNu)M zRH!OabTN@>d%k8fw*|Ld(y4Id&+^5o=?3X@;xj2+2=oVMaKHN7<|g4_Rc&ruW%FXt zl7yhJtw(G@wrQ^^x#|}tsmKzG#Shjg<|wb5gLZ%RAsYbWs07%eYAs}i5r6nHYaz?% z;@m}~i-L|GsAgS?W5_ILwcCpLvaE9nw^DtQHc3a|m^FN3&K=u^Dt%g0wqe&`R@LRQ zH2Rfbs6^&{=-OIxV#@!@mDn0>oc9%i$ZD9&M@rZO*0HT{q1~&k%h8KIpv#s!d;>k& zdMmG(sgz1O%ls@{?Oibq^g_0#!jb|pi@ws( zez8`zE&CuhcBgIkU(KzBb=lz~O}W>0W^YJHuX1d>uG|We!E9H&QQt1hkWjB=8G8O# zhZ@fxu(@tK$>Q8n7>g|F*qVH*b1A3%JJKm;!!3C>suVi*h2B zyA=lUPzBZ-gaOFz2-xLTIP7bTh8Hzb^dqDEQR_hRWK4tdayB}I~= zS`~Qhc#_(DkGDG!-QlOoj_D7;C4DON@#mMpv-u{Kz_w)D)^n#ai1lbok z&SWK*7_)|T_g$?X&22x$XMr{B^t(z-IxRkc353N` zu=_rn?lU9d@r*lai_OLw0j3kkbOc-{$4|VhL@v z1<}G&@hbg1c@lF1B3&>>(LK8_*J&!XwRD@_?}RZHW$&WgJoBjC)MbkuoYB7@Ki(e5 zS2gubFsHTu1s)1sT79mmvMkc3F{j9p!*4=Y@U?47phy4yccW}+1)zMU%EDSpqG?Lo z3Tv&#@dO~s#n2wl+gy1*NJ@pAId31USiYMRf|TR2V3t$u$-N?A-2 z7H-T@;M^sm5G`e!5dEr*DnVb%?wIkO#RPw|SGP-C!Je zw)@(W3(I!ojx9V0I&ItOv-bR3stC6$km#ILDI^9Gu|eDOx%v&uYH9dFj9hg09Aa3$ ztmo-U?e0i1!A!RfG5{`b49(E@kbO#_Mvd>N^#l6r$;7-!uN@Y-K|p@a4e@X=igl4sc~_ z!Z*gwCFHY|LFK*t#Zhz6-jDWPdrh7&hwXd-0*w_}QLg9ZTCfkQ-o?AhKJc1-n3y6$ zP`Xe86f+P6OZj2D=+9&X;Ba+y<0lnx(owiG->30R zC$+@XQ(T#!#WOhqc~+W3_Ia+Zl&Q@GjDFz=YwK!TX$g7Eco`qR%lBw~R927bdG@`(ALv=M z#(t=JzI>Cr5&p*{-g}lUv!9Z9$(Y;SZa@1Brco~wh<~XNAp50nB-qIbVCuifq(jI33!F)vj@_Y}m} z2=S?V*RFofvYt z{gpCCz}wIJo3|InWF8y+mhHj3)exN3WrJdzqCf{m>YLlGi5*nIT%8<7w{3T`9h43^ z-d@2=r8UNRg`IuJ_QDVh{`+5t+29=F-i{@lfkUgtQQbwu1vx?0fw~^VK>{rZHIaEgZ=t|Pm4d`R4Ch|ZjsnTrh z_QKG@pv%XV0;JJKM-_Z^v1pC2p2_1vTfEOE_)Hq8MzX{v<{HcN02U4x+pT;6Cpj(# zdM5qvc9(yGw{^{fHYH&}X&P1)o3=glaLRbh(aRD|4{RenVNaP6AITOjU5PG&nK8D9 zu{cJY@@D1O^P6o>;_b2)zEIKun_In+#aT8$ao9>OGq0Mqte54gw*}h^<2YloE%cRY zt*x>9a$*Zv;``$(&lv2~x+tD0%{w#+wNjcQDdP_?fUWjGfWa{TQ25DF7~*0ov05MK z4|gxHdjFHy4r|w$v9DGuJXQ40T#d{w@htQQahrux;}E)!uCn zOq2Uh3{Hg=%&`+&HD=vjIpyan(UbT*MrQ}ePqI6}nPI3~Zu69T%6i3na z_%e)ZSGcpRb)>r6-9t$k^P9l^{N9FX!JFH2kR-L}hXe1lJhbWNQ^=55UB zV*^pe#Z58j6={;#Fp6N_?2A%^z^Wa(#k-jgj$s7028QG&^|o!RI&H&Y4c;eDiq0Ap zT3}b~@YVVw`Qdgt+1&f@w#%obgr0Z?Ra-o?a~kvoZ&5?#`Wox0@K=~Fw%&`jJD$m+ z^eTXpCC~OcG!!#+$a|xWD`+I^9C`ni5@Wa($@-0G%#kGAT8+5h7 z?NlSn(&KA?G?3|xi1v?rjr7^Eq>A7{o!t@*9gluAg!1vdg?Ge>3V*Wq34i^LZ=nO|%gS$?T;ZOKe($sS z<6Qe8`!7sHadNjG6C?0>(*3De!B15?Z5mg8{G88%fmNN+U*ee#EM?<5$@Tg)vkS}FdToJwZN`>CR8NYu*n7eOz4t8 zBdcULJmZ{_h)O=W`yKn@cCsa^vo_LW-1JQFY~-M+phRrj_k6o8X9Lftuw&WiZ-1t+Ay+sH-Fc_o5e=2{R8{FI zXK#a_DQwR`&)`Kiq?(HMqPrLCXuuUZ-UuYjSC@S%Y*_S(&7qi$$joHJ^BH?et8of3 zWkmF#L+v%5VV#I8Y-F^Sz#*;FG0f|u;#pxifkc_5##HT;@I&^&12#5$Ej|2 zgIr;FIZmm-QtYL;s5fm|K670+7Na+9dQ!u3^1z|)pW%pq_y2@vndyr;ni^@d;#19n zlL({29-EV&5xTEEDr`5$Dgf1<^rK8Eea6kz1IwT`c={pTj@UQ_Hn_ ze|ApE-wssBiyTQ(|H6DOPLvfR{R0(&9Ac@8e_Wh2wbhJIz}b7`P<{5SveS@kV?qJm z2oh*|rgAW^YiLIsgwwS5Qj&oxJ*IpDTe4@}nBZ5YiB^Y-u_07zPjn#ep{XHQEIui$ z4<{Na4y`NaJoVl`+VY$g1)Nsc%4b5ciy5sOSQ%E}t`4N`MdVu(3p8eot<6~>c`YS{ z!9x`Z%4NrgRPf6oTbHY{=uTUoU(s_1Jj8|o5!u6IWm%<-33fR{bP4XUP1(oqerKDj zZ(N_Oj5%ALDf#|n1R$koc#=UPTXTj@I)}H#m)gF5Joei5Tnz=Kt+E{nAl7xM`FAE# znI>NG4Yn(LESZU)uPEWL;)>af>FWGKeuxUGR8H=xz?S~T*^k}T=U%i!qLgbM_vSFu zWPRw3ZzDhx#2mK+ey%s7^6|fxFSEnGK5b;wIpXsoQ+OMtU#u^BC`NR)gF@Pl=G5=S zv%B4nRS=W5Ujf6%6RE`k9z9?K&O{u{Cmc2i{}^!eJ4Mx@SHRc#nbg?30H$kGK%dk_ zhAZ9a0OKH6lVYxss_mTwqpo9BdzYQ@CbHX6#!PXQoi>oznA^e{o%LBZQU>>2)gLS{ zBT4dkFTj&c{)D}spBC4+^Lf*VPB?&wG=lm5AbKet1HOv(Va!CAz9PGeBO0`*`cHT? z+eZ$>GCa*b&K(SmENQK2DcR1Equ`Y3!4%M8P!p5RcwdgkoJ|E8OYeVy2& z7;h8SWj~oNRoHWXIre~kq7S&?bje8Jtb)uxMTg>iq}IVV(SA@Du(U#?iII_@pV z*><>2WFL8YUb3rp1Yc7FD}p2I)jOO=V$0x6;4XOA>~JdAHp23w3@lKMa$<;Cc-5}m z5j<3P$$EEPbm4)fTG(TD{f@$wjr=#eA%@@ok99a5-{`*vdKSpGdDD*I*f^YEHXH2b z9l@r|sIgmel%}>;q8tXrC?1O-H?5cK)_7L9QeA?e`L^nra(%ftfBTNOgKF{0#n}R^ z+~FgQ9)osoaDs79POcT`;*gDT(C_)fj0H9} z`Xsm!<%L_H$K?`}%0@iy_~^+mEk6WYdoiKr}wob;;>_CcZ$ z=NaYLzsr}i1;L-oWXg`hOgD3IO-mTMs4sx|Xu7{zK;+{L2Oq|`%Vq`uLUEovvywz8 z>q!|5ZxQBiTyBZboEu+3QbnLqQr>w9c8!9&ifM0w4+O{2U<-EyQ_hU)sRV>b<{v7*4!B z-b=3afT@1#sE~v@eK}_>p=RE!xRJ;ON%0ZOt507kg!%?I+sZ^21(R%bjPoSI=bGH6 zvx=Q?tz+*ue;;BGRS+t7g_x3lTUSxFPB=-*B*ZSNtM&25;brHE&EkN6)i&e^T{SDY zM>gheReg>INMut$)5E#5If;k30E}b-flg8}l`p|+ZS@vtA0C27*yi6Bh0=BheKtnG z9o53ad-}%UUr%Uv3PEXSH8tWDc_SU&t{j@Yi7obg3`1ou27Mt3fvBb>4Z?d*wb;tv zm3biKMYh{9{f7qS0@<6LHZ}9Bx0(;`u|Z=8;+fYwFCqPQ$e*EpYr<6GTDrsD?bN1b zJR^<-bPx}>SznSxCwgY*fsQ&ZY96eyW7Wc_Zuu3F>UP`@`8o6KM1ncH1_`)7`@@{y z?27qQ)uVR2buiJ8Vle=h+mk$PUK6hr&v0^O`TdYO^D9hAX(I#Y5d^(~gbe9Nn$pXa zme1yJQE`$(<6L$FYle;i9j>w~sfg`+`KCyh$y#t7+WWafXTtp9&wSpeDV-+F_Xm!n z(PPHihe?wp1SsXpKB@=^!wD2lF*N{`Db6EjKCULOL>Fbcb|EJ(lN=1C$`f65`W)!_ z{doH%kvt#5w@>rUOcX`cm-6^&T<=aG_9A?qteOIBmGJvuG zC<^TR7_DeRxmhS>YClvE6txo&?6)6xgwdrzK{$2p!0}Tw^tdF~i~bzXgu`QCU&7=s zj=g$zC)8li7N$tCJZ#3Ro;_O<=)~Gz!RlQ7Y?|9t%vDCJUc2Vm;96>0y&n9v(Wvlw zLO+byb=9*t{IHN*AI~yDin2o7@ND6D(ABN78=no{Y<6=!F2;6KwD(k_U$i= z6=CqR!SXGja&DIm$xiQ9Nw@|Z8lUL!R%)^%?B!w4hOO_4_$_hlN*Q;Eqj&hTg+*L2 z%|>|V#ZtIun2n4N-QUmkQ^ohE?4Va{R1QwP%Wi73jLE?z2++C8VvUVwMNL))JYeH| znBr5-9L7fzZA&SOr9qjHn4I6ba@jVGV=m+SH$Q+jxw_9*nZU(O6fP1;%%cAOys}ki}B$Y0{(9w6f_O&NqiXVt)uDdRPS#5$ud?wZJ>T^mY!)2f1 zI27OP#Eq2 z>qu6$$~NWP&?d4^%G@?PJt-sc7AMfvO|~_<51ke#@-YVCPPT2{BhKtajj+9fRAIBy z(vwQaySaP1?R1zsJb&`8m;s7Bq@3sDOYH$kK5}?^;n_+i!J~DTC~= zPvb9x?TB}Xya7FSqLN_1z8C~ovJzw>E7{Q`RF839Wyc&UOi@%3^VzGs1%|m3{^s2+ z?DBs9jLEbR5Q)z2)UU8#L|9M@V`8UsJ-a(m= z&ve(z3G^sh>2v-uCCcr+oW0~bDYfu}&iP5$&>@Td=X{RN*!7fd_a?*zlrlYqN%%0E zkbw(F@sE6(co1~j$5oGqw30C1`|U!I1R^yX;bQ!fXIYX~}mCIV}$ACpdDvz3z zQret3$^HFPc7YieUFOe$JneqvP9krW{qn1cXz!^Qsp2JVXL2Z>+(|l+tKyk-5pwxZ zIm@ee#>vAeNKRx}w`+E~V(0+3(H^^Yr}IEX;)HC-Wm4|jH|)BdAt?`8^f$YHXYeSH zc3XR=-4LJYf>uc-;)M8E0mnD)EG$DugCC?f1x(0d#pnMx%X9P2!U8ll+burETy#AR zHmI7?9+X|)+pX2JeXzKy!`f%J<&?rtm&}SggEhk)kdvsx?#L!Jt*zK9;rs^2thHI* zvLOy6d>~4JdK>Bhwbj_Lot4?*Kf0TXrC_*!S=&aGC$3>5Mrd3L48=n&=Bv)n@ zo8X|dmdfciu>zI?ZtM`%hMMG4*CB49RZHy=XIc7GdL8=E^&5R`d~J7{y^~PSV`y(W7i`{%CJ# zv3Uvhlv3AtK~9|D+G4L>nDdaOnsFso-j|KYd$UXx3h&PaK)>xi8H;v0w<=Y^VXomy z+ZoK-;tKW3)1(kW`8X-f&I5^pVnpZk;liuUT|&_!u1~A4+Q=k}vog19qpuM569p4C z?K~?-brpNHrT#a**5j-tzH}~Cer+eLHKzmluy_7`Z_277;&#JSV@_?Ii zx^uDkZjQ#jmprxF7H`Z6fU8gpQy5;+XYuKhiE&%?rDrb^1F}7VD&7-Usy}w@EV982 zw(rjBL%)G=hPGXPmi%G9J?|^s?J4A6*co=n63`=)N4ZUVVw_;FL!}`U@Ah#<|8|V+ z&6S+;XX~v1E0&Z{L5nvzkl2Ekle3LnCk`detU&LobZUq5tWrpk9m!uaR8OEU7FE)d z^k$0C^nI+8=a3!E0m`7|M7LuV3f#F?W|HIC>xw35@8O4q>nw@ zK8>#B6DSRjQjLF>?Yrv=sHk8@Md>ZOqCK2agcrSYTnk3qYk{QYBJ}jNd2l14^k>m^J&M z8beuEeoPdX+z>*>Px%d|R(TVl@3Eg_hNxZc)9=)RO7grmCVxo{4ejj`LV^66I+Lt&6ZfZG)U;kV=&IfvaH{Ncj z-m82rIpG&QY{2)YMP=3*%$eHG4yXCpUe-UPMlK-Eu zK>^Bs0`oA4<*-B6*lpD`M7Q#&IAFKu?CYASeQ0-N6XIr0=%|&a&~wfQ`kFZhSy*Zt z5<`(nk=V?k33W?btqn``bYRrB;nA%dOiD*KqU!b~)DfiwPu#2{6XthcvQdGS9&Vd4 z(Thk#k&9v=#`>TQC7s^5Y8CdY}(?svNyPe3$N&YuE)nI#a z8q;_&C4ejnPf>&PwMADmEhk7xmj;_2pkeLi?kNA-jCdx_NkPy$|7JcHOr*+Sxd=Wh z)>r3i;7A-xtkvzX~ zlh&xdT47l^13fan@mW)>#WLiFr2!SLTT+iHRMc8>LVV5X3Wv~dt^Um{ zg0!u@daoD=@Bw9XJQv~(<(kQtffcLEfh*<@TOJct?ZFpqWxV1Xqh^?6a{hvOwkA6+ zUaHcf9kI3ftY~pykSX zvGN|WP0wYtDK}p!K2n&0&51GPF=<|ATPh$#HT${Sj@Z_~w>pX(TCA#gu5C%IxC(XH z_UhZcMQMe8$9DMA{O;i#2Y?NvMEsBLSu!_~am zWaU0S3OK5mawOlt_b8ZJSBtyh_Br*Ct+&OFCZ$a2CQ?ZE|FQT^7C!G$8S&*=d;GZ~ zuoSh2=Z|A~>_l#iMtoA5tUu2hR~b=FEa%lZ`y*&nD<-C*6E|F20d%lN5{Kk_h+QsFS(y>WdcheTN8%;`eqw zpPn1`9*tY1Fmo%S8!tEF59oyE>zUws=SL;8~J<{ zwZDCmu(GYCBv7RIr~V9okW6>7%07#B&Y3I4$IJG)PoSZd#HC+kKkUz=B6%DUzD!E| zg9^rf6)lRd87&mp|8?Nq?={@+irr0kzZ9)y5R=!9aQIF2OahIM@ommgRRsu1`p(gU zGeM!E@ADIR1wubW!PpND7WW;?Y(FNDWcsPMp8_DIVv~S;9Le0d)}MW{XuFTwFF9zb z>x1EHmn&`LQTU==6~AzjMex3QS7ADu>k$dB*;S6ByBFHEyRN*@rByA4ESQ?>{A(OU zyQx)u{jSjLv?~({nw?#RQ|crQi7aL}?kX3~LP+MOU2)dVNAJwJM}>@C!NQFG?I^p& zTaNziI2*Lf*(+`vcBF@M-Wty&nxhQXzjoXIMcw;{cwKM%{)>o+h=_=Yi0FDfUGwYq zsJY$db^Fbq+dO6-b-Ozrx5xSEjEysM=ZC4g!t3rvH$uD-T`99^9LI4S$8j=$WfnF` zL_|bHL_|bHL_|bHM8x;;e66+Kqvw0?<@@hT0g6H6mbW4 zuOeXFme1bWRlK|uaesUDE$+r(RSbGUj@?wV#U{pNtJ{jsV%B=p?#N~}%wSUVa~QW7 zPnSzFL&sQCr%mw*+cNf9VOeS3*@EyYL6IInmxFgHm?M=Ek$)>JF|HaW^lfsCK%UQ+_(Q)jiLJ z(6fbfO_9 zg@avNau(W3{}9&lds}rmsi6vy5uZ>bTN7`J*g=;HTyn{3t$*LGycQ1xdL)jVvvp2@ zSounX&-%m#I95tI-E=o3B|g;L)g+&J+vp!r<5w19tBsEehb`ltw0fUPH~PNDcwNkG zyjGN7Q+~qfqw->>5GOb1*qpVfxsc5cSW6NV=_;+YYCuc^_~puK?Nvvz%0twMF&(+d zN^T=mGl3V0x8)+qiReeq)=S<==N6^Y0-;_(1Nd`Y1KqGZ2^KjBLS?`XpW`(%Dw z6apf$bM{m`a~C+L-QE6-^0r-Ds8<2+pzNu`K$O^adpg&%9*t}9d4DF+P`E0)rtnH7 zf_OIGXf#E=sv41Gp>wu3zmXD?)l%HPb_=yEa)*?lF_`eaB$u|jI%4tm$CtX4A7>$? zy7Vy1p2VCCMv!W6%&wm{)Llhmb;A1MYd>PNjK%EEq4geLU3A@SwesoDrOZeJ|?5M$`7q}#2hF_8o}mNSQG=sGUHs*BZJilUB{ZtO3;JNW7!?5kWl z?sCYhU;8U*W$c@nqn{AY1$2H6*|z~RQ7LaKGUdCRWoKJmvwdGdB>I&te#Cyr7Vv}Z z_G7ltJ769_HS$vhGx67aHN-jnbIuiqAQWi!i$k|Xdc0r#-_4uB!Ee>x+t$>KUzpkR zMd0vsARzt<|4q_KQXKHXmCuK&hV_J%U-i7p5;CXA)GP0qtDi4I2}y12ns_aBr`*fM z2(NwqV)5%KiZGT^s%AgVuFJ7HxsxL7T>pF#tBCbixc?2$$3aR#uTuHw#^>W6u<(-j z3%e0I`n7|J=Sk`jA}`$=1nP*Jw2O|s6S zL^A$v5YLhvyq0h$WV)pe8u3nYAy}4;J5`F)IKeJzXt#9^5tj#CL2P}6)Z(^Y3PQRd zupyADI=iE`(dQ*DQ+%OoeGqtYg90r|?u+__t)&o6W59u19S4&B9@9i6q|z@UAC)-gi!(^gDa% zaw7Fe#4*xI*>2yC+qc`E{P<6>s;B+qRr9Ii`%I36epUul<@W;g~=mh4&?}onEG%6MBL+9`cWpzkqQNh zo|bdit2UbL5PnVu8$ID@BXSQ9o1`bn(U_K=56@7pqu6(G>0wpRCN{QU`PZ&LSK(ER zD>bmlcy_)5LE33o(Z7)(Ks%?wzp5l%s3zOp(r9Bovjr_}xW8P?E)Y=iZu54aTDFXS zdnXSO8UE0ix?1+#m{mItpzme(guxTGH9kkh*nbevE1${%_m%X&_F)XII7!K(uj8-( zQT2@EIxONjLTf+H8KDDkiFnjL$<_G_k!PP)A1Ui3B!`mVGbgV{)>F?k@uzA7EI*l& zLbm)OdoCGdTps!|_iPg;#bTQO%BQ!$FOFYVgSihqjt}-tj3CLc#7z|&pYEUECNdgZ zq;~x-LDAhFIvP*k{e4ad89p^@!{p5Re>2muy=|$_KyllV3 z*8_MY%avddY3JAMUS(T;i}jTX=bYI-mo~&f<%UF&h1erk>?>kQGl%Du`&^3g+kx-M ztKymGoGdm^yE;0P;#ATO#fl3LIBM5;Xa8{LRJ(RxsB9=z)`t1-x)_IzcKtXotdh7# zB+0iM_7&lVswt`>!#=rjU#gTS5okC0FBH16uid;aHWo6N96riA-V*Q`fmT(aQth4!c@AJ>?q|LwsXI}^`By*h@Uq|4TSvVXUw0fo(!=w07Q#ij(d&Pb=moCg^96`Qs%q%ZOn*3@$m z?6Mh-%EsnxHq-w({jR%gR?J+*(ND3{PC~P@_l20crDhXG(m7QVpOM1!=T`V=C~ijc za={@;$*SiRN)@n*-2-d+>ld#D@g{{JPvnVi#TH#YKMWz6>2Ars2pF!391}7EkxqE2 zkF0PX1PIJH$YskM7bK*?;%&a25kwvfaV;cI;bk4V@xu^YUF9QM8BRojUw$S;eUx<>9zpiEnAPY-^&Wt~l9lOTf_Qa7Mw{ zUbP**w^|x7KH9_4Ae1rq=%2AiD>@kAV7t^QVrR4#)yo-F=D1xsVoS+^{aAvCk)c^C z8Cb&O(V;%m-Oumo$05@vE*V}@iDcm?bNYn<<=?3Al@naRp7NpE(dOuklPdnjeyJ7str;35ycju<%--%8gsWTQ!Z z;n02}2S(%+DYII3vU=v`7iIf8oy*?d-GYdF))A}ACjK_QF!=7SvGcick+nM-NPY4~ z0Gs?rMl$V~!*(G*yKj+=W%mf^9h>Z85;?krW_vqlfSKx;y|XXwd39tRCHVs$$i85I zT50cjm$DSw`!Neq!mH4lEOH-Q28{d&&Vo6GNPn1k;R-F?!$=RzltpK=B z52c&RAR1r@c-y&c76bOiFu&-hY za``JFCsgrucDA`~yM2=@C4~i)puDo*#xv#fJHxZ%cy@f39dUh=5<{5)zE7~X7oVNapl** z33?8Epni*Az0{50W%hy#KVs+P$ANEUYT_$iDB=a0;s)+*SH9r#PtnwZJOvzG^+I91 zwe5EG3vrfiBGR|ZuBjTIWt$WpX4mF3$2ce&uQigq-t298#!0;cseciK==b)KHBC@#zXMDN)&sIA)V$ z0>unguyYQcCZSwj4wQTM37{FBX;WhYzK2166{s^UhsU3r(1jT={#i%Wwoar;j!kpjMw<)PktwX#L)p322$?`TtzQ!cIcCM+o z$2J8VEFH{&`vn5(=G+;$(H*ju!0@FaHd=fH&er(WU06t_#97u}JtK`TSXaWPjvSLJ zU!7ZmR7_h|HqtzCH zpJ}7*%qBJUn~7B9Y}plKqsd{uxLvS*k2!qH5lnSl+2euuxG1omx<~7p;XC>XU&k8k z9rom9a7&1gW$bkoH4V7`@O#@Gjfx~u$WyWjVam3r0!N~iOBzr6x=I^{zfLsJbwxRQ zspV(S=E5&!-|wveAcr?1qSxb7Le%FH%^aAD4_CC)z9bWB+dvKbtKN^7d>#^pdvbr0 z+q>82FatY&U$EtF>#tx|sAQ4YZQz9}CDE2G1Yo2e^I)`<*-vGQ*$y^TeXkIDOk7Cl z!xbQYr;sc*l8m!tY3#yX+(rZQ()pc;Pq^$CyO9pGlTIL8vG<^BIvp((+@Vl81;d>6 z9#q%S&Q*gt-%(uB`Ae26bq;<7eIwo!Pj@I=7Dj(_A^RzwnNSUTW6Z}P;ZD6>bfi<0 zs`$yh?aS9xQgy%MlcWH0RW|6|+r?`XDJ^aa_&q zBqoebX1+I9`?5@+w6-9 z8*&Gi%yky(l-2IbT(%|LC()nQwb@rmu#+cGvajP589uHPRlv!<3TI*1W=L-d9V2G{r-qc_elU1*^3Gj>z-=E6d5WaUM_ zIRPWbJzd?hzeq7NW=*qO_ZPW=6mS^FZ;K9`o%&Jl8|QZaVm|nv5OC;}8McZ3qNaPB z-LXH!9@5^I7|BTiqaTH`boSfic&4nq9*=Xvro=P1^y^?|Bqon}R?W$W@8-VVA-o@PDQ$1_>ExZiM|5bQCpj?zSfX=jwl6ZkrP|%ZZ$536i4% zRBMbLw9d|X+1e{8dr?igR=uN|Uk9vCUVL3aLE<7MNQP6@8T3{sibtkSoM_u}N1?7( zN`>*t?>nN^Bab{{4?959XU?=oD~g(CJNK9C(O*{CuAnUjE3&spQ`;emQ`uvQBJ`}d zGIZJFF^9gxC^{kk0PgZk88iev+XZJr9&om>+sU*9$-SJFrrs9>SkX`pA zFTiCNE!)#o6GdJN9>v$@rj7#!xLsBBkx?5$Af1$R}v*tx}X{yl&lw0#w* za(6DWy6ul=S~lImrE}?VuojY2Nuf-=x#IVso9)X!Ai4AO(Fd9LSI<1ry_m~DK7%Qq z2E&T&9L#4$yQh%mQ5%Y9kyTh*%!Zu|mp8O-!mK;uztI0FZN_M{mm#~T^IQX!pZweGZi8^I|CA2C&P?EQE_I-9J z555vG{zD+pmDy6i%QwuurL^OJ+#f*(_z;t1oj|&E9y!k&rJ57ddd04MF~p+c zmwwfY>9!=?1N*}r7r4{$o& z@?z*sR^bhDYj(Q&A%?ikf5{1o98$MOht6JoH|tm&vn0W3t2XJyqE~uz50>*iaFfaZ3zQcjz2Icnro=L(+wf`B_fvD+dt0%~OpCGPiXbDg z{6Cx<&v-GEb2z-SMl=00(0mj&IxAl;Y{KYgvtKNd;Um9WZ*%-jaiJ#P|6E5DY2?rg z%<~^ylG^I*o`eIsCYfM`C%2}K7oS|@eQ!bCxg@EM05%-}1NiSQeR14mNlXn949G;w zvLSkF8Y1W&wdK_WPT^Xnk}|6K9HtE!3sF`*s2%HLEO}0YXUegU#rN_owT7^ zrLB$cMD0||BsUG==dmchCw&nmot_Lvvo3){`vN<`LeOXH6P(rUiq7BQ>x>xNAQvXv z7_eiSX3$!1++2$N5{Xq&F;gjKzs5vuXC0cTrW~)_;ar~HoGmEfP;V_UKM`3(5QAMA z$y&W#TVtc-i}w5q54;xZsOC=bM%qim)m1%{4`$Ih+1A`EolPCKEgA^XuNI?EukFam z-wQ1@*~1BMtTHzjd(;u)6^>yIJ9E2m!P-@gsXWMjkA2MFkM$s;Kknp z@!-h>h1&qeXuNBV*i-QguM4z#<*)62F)k=X#gbvO-u6TzF$Ur&naO#3+Nl&Hl|AEQ z--oM;JsVA3Z&|3o_9j~*Mo4wqQDZ&lU-Q8B#TU}lk#?gTx z$+$CIfpZ>vsrd1*ja1Jh&-evvwA$awVxjGlv6d&Ib&2NX^kHk;$t0PsCK5E~T2iv( zY?8`?`|n@9#?CoeM0PqVjV6S}D^y57Xm7;Go(9kLX9tl5iNLB_E5D*R^ido0!Af0< zi#gaPvYL`o&|ej{`A&Q)W_y+?p5*TNZekp*hX?t;H{J@i>Zj!UPEBoFYdTc~xWhhh zqAcnRg0FIm)wglU`6xOF^NCc1_HhNZdpR5eU!Nq3FsXf-VBi_)=>R`V0>;fAF{RT! zcXFG4*WLC-g&mG)KzzL@vt@ewGS^@^C2qd*E=njAidSN;63!(u=zWu?eP$)XCL<>vjpLyV<^RDo4r^lv{_^HX{vcP{!Vo)yxD@^nZ3Iah~kFu9_C z@lHzCiB9{qIw(gtC`kuck)O2n<+{SEokv=-o;208)k{LSqUWMAb6{QTv@8AJIB|5@ zRXriSENp9Sw5xku{*eGFR}(J(c1_R!TmJFu8renI7T3l%VYDfMGS^l8a0dq>2{+fr zGvyP1JvPJ*J>`IHs@ZBc<|l9;u{)*4xvA<}J5Cm{VY|5}gc~y2qb_A?w3-dUY&8XeOl9I++}lm+%#ElbE|GuO>